From db7f4b08480f2a8941b22a5fa958cf5b9fff32b9 Mon Sep 17 00:00:00 2001
From: Nick Knize
Date: Wed, 23 Mar 2022 07:26:18 -0500
Subject: [PATCH 01/73] [Upgrade] Lucene 9.1 release (#2560)
Upgrades to the official 9.1 release
Signed-off-by: Nicholas Walter Knize
---
buildSrc/version.properties | 2 +-
.../lucene-expressions-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
.../lang-expression/licenses/lucene-expressions-9.1.0.jar.sha1 | 1 +
.../lucene-analysis-icu-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
.../analysis-icu/licenses/lucene-analysis-icu-9.1.0.jar.sha1 | 1 +
...lucene-analysis-kuromoji-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
.../licenses/lucene-analysis-kuromoji-9.1.0.jar.sha1 | 1 +
.../lucene-analysis-nori-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
.../analysis-nori/licenses/lucene-analysis-nori-9.1.0.jar.sha1 | 1 +
...lucene-analysis-phonetic-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
.../licenses/lucene-analysis-phonetic-9.1.0.jar.sha1 | 1 +
.../lucene-analysis-smartcn-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
.../licenses/lucene-analysis-smartcn-9.1.0.jar.sha1 | 1 +
.../lucene-analysis-stempel-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
.../licenses/lucene-analysis-stempel-9.1.0.jar.sha1 | 1 +
...cene-analysis-morfologik-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
.../licenses/lucene-analysis-morfologik-9.1.0.jar.sha1 | 1 +
.../lucene-analysis-common-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
server/licenses/lucene-analysis-common-9.1.0.jar.sha1 | 1 +
.../lucene-backward-codecs-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
server/licenses/lucene-backward-codecs-9.1.0.jar.sha1 | 1 +
server/licenses/lucene-core-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
server/licenses/lucene-core-9.1.0.jar.sha1 | 1 +
.../lucene-grouping-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
server/licenses/lucene-grouping-9.1.0.jar.sha1 | 1 +
.../lucene-highlighter-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
server/licenses/lucene-highlighter-9.1.0.jar.sha1 | 1 +
server/licenses/lucene-join-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
server/licenses/lucene-join-9.1.0.jar.sha1 | 1 +
.../licenses/lucene-memory-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
server/licenses/lucene-memory-9.1.0.jar.sha1 | 1 +
server/licenses/lucene-misc-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
server/licenses/lucene-misc-9.1.0.jar.sha1 | 1 +
.../licenses/lucene-queries-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
server/licenses/lucene-queries-9.1.0.jar.sha1 | 1 +
.../lucene-queryparser-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
server/licenses/lucene-queryparser-9.1.0.jar.sha1 | 1 +
.../licenses/lucene-sandbox-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
server/licenses/lucene-sandbox-9.1.0.jar.sha1 | 1 +
.../lucene-spatial-extras-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
server/licenses/lucene-spatial-extras-9.1.0.jar.sha1 | 1 +
.../lucene-spatial3d-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
server/licenses/lucene-spatial3d-9.1.0.jar.sha1 | 1 +
.../licenses/lucene-suggest-9.1.0-snapshot-ea989fe8f30.jar.sha1 | 1 -
server/licenses/lucene-suggest-9.1.0.jar.sha1 | 1 +
45 files changed, 23 insertions(+), 23 deletions(-)
delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.1.0.jar.sha1
delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0.jar.sha1
delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0.jar.sha1
delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0.jar.sha1
delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0.jar.sha1
delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0.jar.sha1
delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0.jar.sha1
delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-analysis-common-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 server/licenses/lucene-analysis-common-9.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-backward-codecs-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 server/licenses/lucene-backward-codecs-9.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-core-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 server/licenses/lucene-core-9.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-grouping-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 server/licenses/lucene-grouping-9.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-highlighter-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 server/licenses/lucene-highlighter-9.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-join-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 server/licenses/lucene-join-9.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-memory-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 server/licenses/lucene-memory-9.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-misc-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 server/licenses/lucene-misc-9.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-queries-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 server/licenses/lucene-queries-9.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-queryparser-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 server/licenses/lucene-queryparser-9.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-sandbox-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 server/licenses/lucene-sandbox-9.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-spatial-extras-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 server/licenses/lucene-spatial-extras-9.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-spatial3d-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 server/licenses/lucene-spatial3d-9.1.0.jar.sha1
delete mode 100644 server/licenses/lucene-suggest-9.1.0-snapshot-ea989fe8f30.jar.sha1
create mode 100644 server/licenses/lucene-suggest-9.1.0.jar.sha1
diff --git a/buildSrc/version.properties b/buildSrc/version.properties
index b5e14cd24bd93..34934d63a8975 100644
--- a/buildSrc/version.properties
+++ b/buildSrc/version.properties
@@ -1,5 +1,5 @@
opensearch = 2.0.0
-lucene = 9.1.0-snapshot-ea989fe8f30
+lucene = 9.1.0
bundled_jdk_vendor = adoptium
bundled_jdk = 17.0.2+8
diff --git a/modules/lang-expression/licenses/lucene-expressions-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index fb85ff4827c36..0000000000000
--- a/modules/lang-expression/licenses/lucene-expressions-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-c7317bb4e72b820a516e0c8a90beac5acc82c2e2
\ No newline at end of file
diff --git a/modules/lang-expression/licenses/lucene-expressions-9.1.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..c825e197188fc
--- /dev/null
+++ b/modules/lang-expression/licenses/lucene-expressions-9.1.0.jar.sha1
@@ -0,0 +1 @@
+2711abb758d101fc738c35a6867ee7559da5308b
\ No newline at end of file
diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index 2f0a6ad50e337..0000000000000
--- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-77930f802430648980eded22ca6ed47fedaeaba4
\ No newline at end of file
diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..b7733cfa9a00a
--- /dev/null
+++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.1.0.jar.sha1
@@ -0,0 +1 @@
+e9b429da553560fa0c363ffc04c774f957c56e14
\ No newline at end of file
diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index a0d112dd733ab..0000000000000
--- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-c66f568fa9138c6ab6f3abf1efbfab3c7b5991d4
\ No newline at end of file
diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..f5b818a206e7a
--- /dev/null
+++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.1.0.jar.sha1
@@ -0,0 +1 @@
+b247f8a877237b4663e4ab7d86fae21c68a58ea5
\ No newline at end of file
diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index a3f939bfe9e05..0000000000000
--- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e8c47600ea859b999a5f5647341b0350b03dafcd
\ No newline at end of file
diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..4d22255d10316
--- /dev/null
+++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.1.0.jar.sha1
@@ -0,0 +1 @@
+30e24b42fb0440911e702a531f4373bf397eb8c6
\ No newline at end of file
diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index e2006546433fd..0000000000000
--- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-6f0f5c71052beee26e4ce99e1147ce406234f417
\ No newline at end of file
diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..a0607e6158cdd
--- /dev/null
+++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.1.0.jar.sha1
@@ -0,0 +1 @@
+18a321d93836ea2856a5302d192e9dc99c647c6e
\ No newline at end of file
diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index e675c5774f5a4..0000000000000
--- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-32aad8b8491df3c9862e7fe75e98bccdb6a25bda
\ No newline at end of file
diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..bff959139a86c
--- /dev/null
+++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.1.0.jar.sha1
@@ -0,0 +1 @@
+41c847f39a15bb8495be8c9d8a098974be15f74b
\ No newline at end of file
diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index 053f5c97d65dc..0000000000000
--- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-ef546cfaaf727d93c4e86ddc7f77b525af135623
\ No newline at end of file
diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..39d25d7872ea9
--- /dev/null
+++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.1.0.jar.sha1
@@ -0,0 +1 @@
+ee7995231b181aa0a01f5aef8775562e269f5ef7
\ No newline at end of file
diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index e5a2a0b0b4ab3..0000000000000
--- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-21c3511469f67019804e41a8d83ffc5c36de6479
\ No newline at end of file
diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..9f07f122205d9
--- /dev/null
+++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.1.0.jar.sha1
@@ -0,0 +1 @@
+575c458431396baa7f01a546173807f27b12a087
\ No newline at end of file
diff --git a/server/licenses/lucene-analysis-common-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-analysis-common-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index 6ef0f1eafc345..0000000000000
--- a/server/licenses/lucene-analysis-common-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-bafd720282a371efe7b0e7238f9dee7e2ad3a586
\ No newline at end of file
diff --git a/server/licenses/lucene-analysis-common-9.1.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..4d2a9cf9451cc
--- /dev/null
+++ b/server/licenses/lucene-analysis-common-9.1.0.jar.sha1
@@ -0,0 +1 @@
+240e3997fb139ff001e022124c89b686b5a8498d
\ No newline at end of file
diff --git a/server/licenses/lucene-backward-codecs-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-backward-codecs-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index 017333945a866..0000000000000
--- a/server/licenses/lucene-backward-codecs-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-597fe288a252a14c0876451c97afee2b4529f85a
\ No newline at end of file
diff --git a/server/licenses/lucene-backward-codecs-9.1.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..b6df56db28cd6
--- /dev/null
+++ b/server/licenses/lucene-backward-codecs-9.1.0.jar.sha1
@@ -0,0 +1 @@
+de23bdacb09e8b39cbe876ff79c7a5b2ecc1faa6
\ No newline at end of file
diff --git a/server/licenses/lucene-core-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-core-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index a2ba0f0ffa43c..0000000000000
--- a/server/licenses/lucene-core-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-80cd2fff33ced89924771c7079d42bf82f1266f6
\ No newline at end of file
diff --git a/server/licenses/lucene-core-9.1.0.jar.sha1 b/server/licenses/lucene-core-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..45e7ae47dae3e
--- /dev/null
+++ b/server/licenses/lucene-core-9.1.0.jar.sha1
@@ -0,0 +1 @@
+0375603f1dacd8266526404faf0088a2ac8ec2ff
\ No newline at end of file
diff --git a/server/licenses/lucene-grouping-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-grouping-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index ac0c1be0f952b..0000000000000
--- a/server/licenses/lucene-grouping-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-7059f47668a2942c60ad03b1d58eca8dcb010e4e
\ No newline at end of file
diff --git a/server/licenses/lucene-grouping-9.1.0.jar.sha1 b/server/licenses/lucene-grouping-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..be423fdde04f7
--- /dev/null
+++ b/server/licenses/lucene-grouping-9.1.0.jar.sha1
@@ -0,0 +1 @@
+703308505e62fa7dcb0bf64fdb6d95d335941bdc
\ No newline at end of file
diff --git a/server/licenses/lucene-highlighter-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-highlighter-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index fa08ed63f7c44..0000000000000
--- a/server/licenses/lucene-highlighter-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3c841ca23eb08a939fa49ba4af249c3b6d849c42
\ No newline at end of file
diff --git a/server/licenses/lucene-highlighter-9.1.0.jar.sha1 b/server/licenses/lucene-highlighter-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..c130c27ed4c37
--- /dev/null
+++ b/server/licenses/lucene-highlighter-9.1.0.jar.sha1
@@ -0,0 +1 @@
+7f1925f6ef985000399a277ca17b8f67d3056838
\ No newline at end of file
diff --git a/server/licenses/lucene-join-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-join-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index 2a3e2a9107a60..0000000000000
--- a/server/licenses/lucene-join-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-4984e041ae68f5939c01e41b2c9648ae2c021340
\ No newline at end of file
diff --git a/server/licenses/lucene-join-9.1.0.jar.sha1 b/server/licenses/lucene-join-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..b678051ddaf26
--- /dev/null
+++ b/server/licenses/lucene-join-9.1.0.jar.sha1
@@ -0,0 +1 @@
+e7d39da8e623c99ee8da8bcc0185b2d908aca4b3
\ No newline at end of file
diff --git a/server/licenses/lucene-memory-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-memory-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index eefd08d222ef8..0000000000000
--- a/server/licenses/lucene-memory-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-fead9467ce65469579168eb0f47e014fdb3c63d9
\ No newline at end of file
diff --git a/server/licenses/lucene-memory-9.1.0.jar.sha1 b/server/licenses/lucene-memory-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..a07b052e9c332
--- /dev/null
+++ b/server/licenses/lucene-memory-9.1.0.jar.sha1
@@ -0,0 +1 @@
+209166fd48dae3261ccf26990fe600332b8fb373
\ No newline at end of file
diff --git a/server/licenses/lucene-misc-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-misc-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index 226f97cf6f3bc..0000000000000
--- a/server/licenses/lucene-misc-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-d98ab1966b8ca53b70fe071281bcea27d602ec30
\ No newline at end of file
diff --git a/server/licenses/lucene-misc-9.1.0.jar.sha1 b/server/licenses/lucene-misc-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..8627e481c6214
--- /dev/null
+++ b/server/licenses/lucene-misc-9.1.0.jar.sha1
@@ -0,0 +1 @@
+905d93b6389060cf4b0cb464ffa8fa2db81b60e7
\ No newline at end of file
diff --git a/server/licenses/lucene-queries-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-queries-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index c151e6b76e21a..0000000000000
--- a/server/licenses/lucene-queries-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-895e27127ae55031e35e152da8be941bd55f7f6a
\ No newline at end of file
diff --git a/server/licenses/lucene-queries-9.1.0.jar.sha1 b/server/licenses/lucene-queries-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..9e81da7ca5c15
--- /dev/null
+++ b/server/licenses/lucene-queries-9.1.0.jar.sha1
@@ -0,0 +1 @@
+c50fc971573910ea239ee6f275e9257b6b6bdd48
\ No newline at end of file
diff --git a/server/licenses/lucene-queryparser-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-queryparser-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index b73b7152aed05..0000000000000
--- a/server/licenses/lucene-queryparser-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-1433392237ea01ef35f4e2ffc52f496b0669624c
\ No newline at end of file
diff --git a/server/licenses/lucene-queryparser-9.1.0.jar.sha1 b/server/licenses/lucene-queryparser-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..fb04adf2051d0
--- /dev/null
+++ b/server/licenses/lucene-queryparser-9.1.0.jar.sha1
@@ -0,0 +1 @@
+383eb69b12f9d9c98c44237155f50c870c9a34b9
\ No newline at end of file
diff --git a/server/licenses/lucene-sandbox-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-sandbox-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index d441dd2f8cb31..0000000000000
--- a/server/licenses/lucene-sandbox-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b0688963ca8288f5a3e47ca6e4b38bc2fde780e7
\ No newline at end of file
diff --git a/server/licenses/lucene-sandbox-9.1.0.jar.sha1 b/server/licenses/lucene-sandbox-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..429a84de46f3c
--- /dev/null
+++ b/server/licenses/lucene-sandbox-9.1.0.jar.sha1
@@ -0,0 +1 @@
+0c728684e750a63f881998fbe27afd897f739762
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial-extras-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-spatial-extras-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index 5ffa78a6e7d87..0000000000000
--- a/server/licenses/lucene-spatial-extras-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-206e8918a726710c8a6fb927e59adf26c6ad5bed
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial-extras-9.1.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..7078cbc05fff7
--- /dev/null
+++ b/server/licenses/lucene-spatial-extras-9.1.0.jar.sha1
@@ -0,0 +1 @@
+94d7d107c399cd11d407b94fa62f5677fe86f63b
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial3d-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-spatial3d-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index 8c4bb08303c34..0000000000000
--- a/server/licenses/lucene-spatial3d-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3d1e26c37b45bdf2ef598d16468220ab33983a8f
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial3d-9.1.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..604e8ed054ac1
--- /dev/null
+++ b/server/licenses/lucene-spatial3d-9.1.0.jar.sha1
@@ -0,0 +1 @@
+7717b300bc14dfa9eb4b7d5970d8e25a60010e64
\ No newline at end of file
diff --git a/server/licenses/lucene-suggest-9.1.0-snapshot-ea989fe8f30.jar.sha1 b/server/licenses/lucene-suggest-9.1.0-snapshot-ea989fe8f30.jar.sha1
deleted file mode 100644
index 3c8d9b87da0e5..0000000000000
--- a/server/licenses/lucene-suggest-9.1.0-snapshot-ea989fe8f30.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-69ab05339614766c732fef7c037cc5b676bd40dc
\ No newline at end of file
diff --git a/server/licenses/lucene-suggest-9.1.0.jar.sha1 b/server/licenses/lucene-suggest-9.1.0.jar.sha1
new file mode 100644
index 0000000000000..4562a19706634
--- /dev/null
+++ b/server/licenses/lucene-suggest-9.1.0.jar.sha1
@@ -0,0 +1 @@
+957fca507eba94dbc3ef0d02377839be49bbe619
\ No newline at end of file
From c1d5491baf02b5ea0223d3075a5e1fc288d54bcf Mon Sep 17 00:00:00 2001
From: Suraj Singh <79435743+dreamer-89@users.noreply.github.com>
Date: Wed, 23 Mar 2022 06:15:17 -0700
Subject: [PATCH 02/73] [Type removal] Remove deprecation warning on use of
_type in doc scripts (#2564)
Signed-off-by: Suraj Singh
---
.../java/org/opensearch/search/lookup/LeafDocLookup.java | 9 ---------
1 file changed, 9 deletions(-)
diff --git a/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java b/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java
index 82daa94d92146..716476101ac48 100644
--- a/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java
+++ b/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java
@@ -33,7 +33,6 @@
import org.apache.lucene.index.LeafReaderContext;
import org.opensearch.ExceptionsHelper;
-import org.opensearch.common.logging.DeprecationLogger;
import org.opensearch.index.fielddata.IndexFieldData;
import org.opensearch.index.fielddata.ScriptDocValues;
import org.opensearch.index.mapper.MappedFieldType;
@@ -50,10 +49,6 @@
public class LeafDocLookup implements Map> {
- private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(LeafDocLookup.class);
- static final String TYPES_DEPRECATION_KEY = "type-field-doc-lookup";
- static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Looking up doc types [_type] in scripts is deprecated.";
-
private final Map> localCacheFieldData = new HashMap<>(4);
private final MapperService mapperService;
@@ -78,10 +73,6 @@ public void setDocument(int docId) {
@Override
public ScriptDocValues> get(Object key) {
- // deprecate _type
- if ("_type".equals(key)) {
- DEPRECATION_LOGGER.deprecate(TYPES_DEPRECATION_KEY, TYPES_DEPRECATION_MESSAGE);
- }
// assume its a string...
String fieldName = key.toString();
ScriptDocValues> scriptValues = localCacheFieldData.get(fieldName);
From 511ac884fe49e6d97738d42a67ffa1819396ca2a Mon Sep 17 00:00:00 2001
From: Nick Knize
Date: Wed, 23 Mar 2022 13:26:36 -0500
Subject: [PATCH 03/73] [Bug] Fix InboundDecoder version compat check (#2570)
Change InboundDecoder ensureVersionCompatibility check for onOrAfter
V_2_0_0 instead of explicit version check. This way bug fix and minor
versions will correctly handshake in a mixed 1.x Cluster.
Signed-off-by: Nicholas Walter Knize
---
.../src/main/java/org/opensearch/transport/InboundDecoder.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/server/src/main/java/org/opensearch/transport/InboundDecoder.java b/server/src/main/java/org/opensearch/transport/InboundDecoder.java
index bd1d384fd37da..9cfb4a79161e7 100644
--- a/server/src/main/java/org/opensearch/transport/InboundDecoder.java
+++ b/server/src/main/java/org/opensearch/transport/InboundDecoder.java
@@ -217,7 +217,7 @@ static IllegalStateException ensureVersionCompatibility(Version remoteVersion, V
// handshake. This looks odd but it's required to establish the connection correctly we check for real compatibility
// once the connection is established
final Version compatibilityVersion = isHandshake ? currentVersion.minimumCompatibilityVersion() : currentVersion;
- if ((currentVersion.equals(Version.V_2_0_0) && remoteVersion.equals(Version.fromId(6079999))) == false
+ if ((currentVersion.onOrAfter(Version.V_2_0_0) && remoteVersion.equals(Version.fromId(6079999))) == false
&& remoteVersion.isCompatible(compatibilityVersion) == false) {
final Version minCompatibilityVersion = isHandshake ? compatibilityVersion : compatibilityVersion.minimumCompatibilityVersion();
String msg = "Received " + (isHandshake ? "handshake " : "") + "message from unsupported version: [";
From 2e9f89a89efb2b44adc783d378e433bce79273c0 Mon Sep 17 00:00:00 2001
From: Vacha Shah
Date: Wed, 23 Mar 2022 12:56:11 -0700
Subject: [PATCH 04/73] Adding signoff option for version workflow PR (#2572)
Signed-off-by: Vacha Shah
---
.github/workflows/version.yml | 3 +++
1 file changed, 3 insertions(+)
diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml
index b42e7c4f2f317..030689642677a 100644
--- a/.github/workflows/version.yml
+++ b/.github/workflows/version.yml
@@ -59,6 +59,7 @@ jobs:
base: ${{ env.BASE }}
branch: 'create-pull-request/patch-${{ env.BASE }}'
commit-message: Incremented version to ${{ env.NEXT_VERSION }}
+ signoff: true
delete-branch: true
title: '[AUTO] Incremented version to ${{ env.NEXT_VERSION }}.'
body: |
@@ -83,6 +84,7 @@ jobs:
base: ${{ env.BASE_X }}
branch: 'create-pull-request/patch-${{ env.BASE_X }}'
commit-message: Added bwc version ${{ env.NEXT_VERSION }}
+ signoff: true
delete-branch: true
title: '[AUTO] [${{ env.BASE_X }}] Added bwc version ${{ env.NEXT_VERSION }}.'
body: |
@@ -107,6 +109,7 @@ jobs:
base: main
branch: 'create-pull-request/patch-main'
commit-message: Added bwc version ${{ env.NEXT_VERSION }}
+ signoff: true
delete-branch: true
title: '[AUTO] [main] Added bwc version ${{ env.NEXT_VERSION }}.'
body: |
From b6ca0d1f78e765f509d6b52af8f488548fdddf94 Mon Sep 17 00:00:00 2001
From: Andriy Redko
Date: Thu, 24 Mar 2022 14:20:31 -0400
Subject: [PATCH 05/73] Concurrent Searching (Experimental) (#1500)
* Concurrent Searching (Experimental)
Signed-off-by: Andriy Redko
* Addressingf code review comments
Signed-off-by: Andriy Redko
---
.../plugins/concurrent-search/build.gradle | 42 +
.../search/ConcurrentSegmentSearchPlugin.java | 53 +
.../org/opensearch/search/package-info.java | 12 +
.../query/ConcurrentQueryPhaseSearcher.java | 119 ++
.../opensearch/search/query/package-info.java | 12 +
.../profile/query/QueryProfilerTests.java | 316 ++++
.../search/query/QueryPhaseTests.java | 1335 +++++++++++++++++
.../search/query/QueryProfilePhaseTests.java | 1182 +++++++++++++++
.../common/lucene/MinimumScoreCollector.java | 4 +
.../lucene/search/FilteredCollector.java | 4 +
.../search/DefaultSearchContext.java | 8 +-
.../search/aggregations/AggregationPhase.java | 42 +-
.../search/internal/ContextIndexSearcher.java | 33 +-
.../internal/FilteredSearchContext.java | 6 +-
.../search/internal/SearchContext.java | 6 +-
.../opensearch/search/profile/Profilers.java | 2 +-
.../InternalProfileCollectorManager.java | 89 ++
.../query/ProfileCollectorManager.java | 17 +
.../query/EarlyTerminatingCollector.java | 4 +
.../EarlyTerminatingCollectorManager.java | 74 +
.../query/EarlyTerminatingListener.java | 22 +
.../query/FilteredCollectorManager.java | 45 +
.../search/query/MinimumCollectorManager.java | 44 +
.../search/query/MultiCollectorWrapper.java | 58 +
.../search/query/QueryCollectorContext.java | 71 +-
.../query/QueryCollectorManagerContext.java | 99 ++
.../opensearch/search/query/QueryPhase.java | 4 +-
.../search/query/ReduceableSearchResult.java | 23 +
.../search/query/TopDocsCollectorContext.java | 338 ++++-
.../query/TotalHitCountCollectorManager.java | 106 ++
.../search/DefaultSearchContextTests.java | 38 +-
.../search/SearchCancellationTests.java | 9 +-
.../internal/ContextIndexSearcherTests.java | 3 +-
.../profile/query/QueryProfilerTests.java | 32 +-
.../search/query/QueryPhaseTests.java | 285 +++-
.../search/query/QueryProfilePhaseTests.java | 1158 ++++++++++++++
.../aggregations/AggregatorTestCase.java | 3 +-
.../opensearch/test/TestSearchContext.java | 36 +-
38 files changed, 5563 insertions(+), 171 deletions(-)
create mode 100644 sandbox/plugins/concurrent-search/build.gradle
create mode 100644 sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/ConcurrentSegmentSearchPlugin.java
create mode 100644 sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/package-info.java
create mode 100644 sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/query/ConcurrentQueryPhaseSearcher.java
create mode 100644 sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/query/package-info.java
create mode 100644 sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java
create mode 100644 sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java
create mode 100644 sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java
create mode 100644 server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollectorManager.java
create mode 100644 server/src/main/java/org/opensearch/search/profile/query/ProfileCollectorManager.java
create mode 100644 server/src/main/java/org/opensearch/search/query/EarlyTerminatingCollectorManager.java
create mode 100644 server/src/main/java/org/opensearch/search/query/EarlyTerminatingListener.java
create mode 100644 server/src/main/java/org/opensearch/search/query/FilteredCollectorManager.java
create mode 100644 server/src/main/java/org/opensearch/search/query/MinimumCollectorManager.java
create mode 100644 server/src/main/java/org/opensearch/search/query/MultiCollectorWrapper.java
create mode 100644 server/src/main/java/org/opensearch/search/query/QueryCollectorManagerContext.java
create mode 100644 server/src/main/java/org/opensearch/search/query/ReduceableSearchResult.java
create mode 100644 server/src/main/java/org/opensearch/search/query/TotalHitCountCollectorManager.java
create mode 100644 server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java
diff --git a/sandbox/plugins/concurrent-search/build.gradle b/sandbox/plugins/concurrent-search/build.gradle
new file mode 100644
index 0000000000000..acc3cb5092cd8
--- /dev/null
+++ b/sandbox/plugins/concurrent-search/build.gradle
@@ -0,0 +1,42 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ *
+ * Modifications Copyright OpenSearch Contributors. See
+ * GitHub history for details.
+ */
+
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+apply plugin: 'opensearch.opensearchplugin'
+apply plugin: 'opensearch.yaml-rest-test'
+
+opensearchplugin {
+ name 'concurrent-search'
+ description 'The experimental plugin which implements concurrent search over Apache Lucene segments'
+ classname 'org.opensearch.search.ConcurrentSegmentSearchPlugin'
+ licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt')
+ noticeFile rootProject.file('NOTICE.txt')
+}
+
+yamlRestTest.enabled = false;
+testingConventions.enabled = false;
\ No newline at end of file
diff --git a/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/ConcurrentSegmentSearchPlugin.java b/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/ConcurrentSegmentSearchPlugin.java
new file mode 100644
index 0000000000000..da999e40f0f07
--- /dev/null
+++ b/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/ConcurrentSegmentSearchPlugin.java
@@ -0,0 +1,53 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.search;
+
+import org.opensearch.common.settings.Settings;
+import org.opensearch.common.util.concurrent.OpenSearchExecutors;
+import org.opensearch.plugins.Plugin;
+import org.opensearch.plugins.SearchPlugin;
+import org.opensearch.search.query.ConcurrentQueryPhaseSearcher;
+import org.opensearch.search.query.QueryPhaseSearcher;
+import org.opensearch.threadpool.ExecutorBuilder;
+import org.opensearch.threadpool.FixedExecutorBuilder;
+import org.opensearch.threadpool.ThreadPool;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Optional;
+
+/**
+ * The experimental plugin which implements the concurrent search over Apache Lucene segments.
+ */
+public class ConcurrentSegmentSearchPlugin extends Plugin implements SearchPlugin {
+ private static final String INDEX_SEARCHER = "index_searcher";
+
+ /**
+ * Default constructor
+ */
+ public ConcurrentSegmentSearchPlugin() {}
+
+ @Override
+ public Optional getQueryPhaseSearcher() {
+ return Optional.of(new ConcurrentQueryPhaseSearcher());
+ }
+
+ @Override
+ public List> getExecutorBuilders(Settings settings) {
+ final int allocatedProcessors = OpenSearchExecutors.allocatedProcessors(settings);
+ return Collections.singletonList(
+ new FixedExecutorBuilder(settings, INDEX_SEARCHER, allocatedProcessors, 1000, "thread_pool." + INDEX_SEARCHER)
+ );
+ }
+
+ @Override
+ public Optional getIndexSearcherExecutorProvider() {
+ return Optional.of((ThreadPool threadPool) -> threadPool.executor(INDEX_SEARCHER));
+ }
+}
diff --git a/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/package-info.java b/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/package-info.java
new file mode 100644
index 0000000000000..041f914fab7d7
--- /dev/null
+++ b/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/package-info.java
@@ -0,0 +1,12 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+/**
+ * The implementation of the experimental plugin which implements the concurrent search over Apache Lucene segments.
+ */
+package org.opensearch.search;
diff --git a/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/query/ConcurrentQueryPhaseSearcher.java b/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/query/ConcurrentQueryPhaseSearcher.java
new file mode 100644
index 0000000000000..65f339838a40b
--- /dev/null
+++ b/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/query/ConcurrentQueryPhaseSearcher.java
@@ -0,0 +1,119 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.search.query;
+
+import static org.opensearch.search.query.TopDocsCollectorContext.createTopDocsCollectorContext;
+
+import java.io.IOException;
+import java.util.LinkedList;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.CollectorManager;
+import org.apache.lucene.search.Query;
+import org.opensearch.search.internal.ContextIndexSearcher;
+import org.opensearch.search.internal.SearchContext;
+import org.opensearch.search.profile.query.ProfileCollectorManager;
+import org.opensearch.search.query.QueryPhase.DefaultQueryPhaseSearcher;
+import org.opensearch.search.query.QueryPhase.TimeExceededException;
+
+/**
+ * The implementation of the {@link QueryPhaseSearcher} which attempts to use concurrent
+ * search of Apache Lucene segments if it has been enabled.
+ */
+public class ConcurrentQueryPhaseSearcher extends DefaultQueryPhaseSearcher {
+ private static final Logger LOGGER = LogManager.getLogger(ConcurrentQueryPhaseSearcher.class);
+
+ /**
+ * Default constructor
+ */
+ public ConcurrentQueryPhaseSearcher() {}
+
+ @Override
+ protected boolean searchWithCollector(
+ SearchContext searchContext,
+ ContextIndexSearcher searcher,
+ Query query,
+ LinkedList collectors,
+ boolean hasFilterCollector,
+ boolean hasTimeout
+ ) throws IOException {
+ boolean couldUseConcurrentSegmentSearch = allowConcurrentSegmentSearch(searcher);
+
+ // TODO: support aggregations
+ if (searchContext.aggregations() != null) {
+ couldUseConcurrentSegmentSearch = false;
+ LOGGER.debug("Unable to use concurrent search over index segments (experimental): aggregations are present");
+ }
+
+ if (couldUseConcurrentSegmentSearch) {
+ LOGGER.debug("Using concurrent search over index segments (experimental)");
+ return searchWithCollectorManager(searchContext, searcher, query, collectors, hasFilterCollector, hasTimeout);
+ } else {
+ return super.searchWithCollector(searchContext, searcher, query, collectors, hasFilterCollector, hasTimeout);
+ }
+ }
+
+ private static boolean searchWithCollectorManager(
+ SearchContext searchContext,
+ ContextIndexSearcher searcher,
+ Query query,
+ LinkedList collectorContexts,
+ boolean hasFilterCollector,
+ boolean timeoutSet
+ ) throws IOException {
+ // create the top docs collector last when the other collectors are known
+ final TopDocsCollectorContext topDocsFactory = createTopDocsCollectorContext(searchContext, hasFilterCollector);
+ // add the top docs collector, the first collector context in the chain
+ collectorContexts.addFirst(topDocsFactory);
+
+ final QuerySearchResult queryResult = searchContext.queryResult();
+ final CollectorManager, ReduceableSearchResult> collectorManager;
+
+ // TODO: support aggregations in concurrent segment search flow
+ if (searchContext.aggregations() != null) {
+ throw new UnsupportedOperationException("The concurrent segment search does not support aggregations yet");
+ }
+
+ if (searchContext.getProfilers() != null) {
+ final ProfileCollectorManager extends Collector, ReduceableSearchResult> profileCollectorManager =
+ QueryCollectorManagerContext.createQueryCollectorManagerWithProfiler(collectorContexts);
+ searchContext.getProfilers().getCurrentQueryProfiler().setCollector(profileCollectorManager);
+ collectorManager = profileCollectorManager;
+ } else {
+ // Create multi collector manager instance
+ collectorManager = QueryCollectorManagerContext.createMultiCollectorManager(collectorContexts);
+ }
+
+ try {
+ final ReduceableSearchResult result = searcher.search(query, collectorManager);
+ result.reduce(queryResult);
+ } catch (EarlyTerminatingCollector.EarlyTerminationException e) {
+ queryResult.terminatedEarly(true);
+ } catch (TimeExceededException e) {
+ assert timeoutSet : "TimeExceededException thrown even though timeout wasn't set";
+ if (searchContext.request().allowPartialSearchResults() == false) {
+ // Can't rethrow TimeExceededException because not serializable
+ throw new QueryPhaseExecutionException(searchContext.shardTarget(), "Time exceeded");
+ }
+ queryResult.searchTimedOut(true);
+ }
+ if (searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER && queryResult.terminatedEarly() == null) {
+ queryResult.terminatedEarly(false);
+ }
+
+ return topDocsFactory.shouldRescore();
+ }
+
+ private static boolean allowConcurrentSegmentSearch(final ContextIndexSearcher searcher) {
+ return (searcher.getExecutor() != null);
+ }
+
+}
diff --git a/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/query/package-info.java b/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/query/package-info.java
new file mode 100644
index 0000000000000..0f98ae7682a84
--- /dev/null
+++ b/sandbox/plugins/concurrent-search/src/main/java/org/opensearch/search/query/package-info.java
@@ -0,0 +1,12 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+/**
+ * {@link org.opensearch.search.query.QueryPhaseSearcher} implementation for concurrent search
+ */
+package org.opensearch.search.query;
diff --git a/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java
new file mode 100644
index 0000000000000..51cb3c8c0cddc
--- /dev/null
+++ b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java
@@ -0,0 +1,316 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.search.profile.query;
+
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.LRUQueryCache;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryCachingPolicy;
+import org.apache.lucene.search.QueryVisitor;
+import org.apache.lucene.search.ScoreMode;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.ScorerSupplier;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TotalHitCountCollector;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.tests.index.RandomIndexWriter;
+import org.apache.lucene.tests.search.RandomApproximationQuery;
+import org.apache.lucene.tests.util.TestUtil;
+import org.opensearch.core.internal.io.IOUtils;
+import org.opensearch.search.internal.ContextIndexSearcher;
+import org.opensearch.search.profile.ProfileResult;
+import org.opensearch.test.OpenSearchTestCase;
+import org.opensearch.threadpool.ThreadPool;
+import org.junit.After;
+import org.junit.Before;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+public class QueryProfilerTests extends OpenSearchTestCase {
+
+ private Directory dir;
+ private IndexReader reader;
+ private ContextIndexSearcher searcher;
+ private ExecutorService executor;
+
+ @ParametersFactory
+ public static Collection concurrency() {
+ return Arrays.asList(new Integer[] { 0 }, new Integer[] { 5 });
+ }
+
+ public QueryProfilerTests(int concurrency) {
+ this.executor = (concurrency > 0) ? Executors.newFixedThreadPool(concurrency) : null;
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+
+ dir = newDirectory();
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+ final int numDocs = TestUtil.nextInt(random(), 1, 20);
+ for (int i = 0; i < numDocs; ++i) {
+ final int numHoles = random().nextInt(5);
+ for (int j = 0; j < numHoles; ++j) {
+ w.addDocument(new Document());
+ }
+ Document doc = new Document();
+ doc.add(new StringField("foo", "bar", Store.NO));
+ w.addDocument(doc);
+ }
+ reader = w.getReader();
+ w.close();
+ searcher = new ContextIndexSearcher(
+ reader,
+ IndexSearcher.getDefaultSimilarity(),
+ IndexSearcher.getDefaultQueryCache(),
+ ALWAYS_CACHE_POLICY,
+ true,
+ executor
+ );
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+
+ LRUQueryCache cache = (LRUQueryCache) searcher.getQueryCache();
+ assertThat(cache.getHitCount(), equalTo(0L));
+ assertThat(cache.getCacheCount(), equalTo(0L));
+ assertThat(cache.getTotalCount(), equalTo(cache.getMissCount()));
+ assertThat(cache.getCacheSize(), equalTo(0L));
+
+ if (executor != null) {
+ ThreadPool.terminate(executor, 10, TimeUnit.SECONDS);
+ }
+
+ IOUtils.close(reader, dir);
+ dir = null;
+ reader = null;
+ searcher = null;
+ }
+
+ public void testBasic() throws IOException {
+ QueryProfiler profiler = new QueryProfiler(executor != null);
+ searcher.setProfiler(profiler);
+ Query query = new TermQuery(new Term("foo", "bar"));
+ searcher.search(query, 1);
+ List results = profiler.getTree();
+ assertEquals(1, results.size());
+ Map breakdown = results.get(0).getTimeBreakdown();
+ assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString()), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString()), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.ADVANCE.toString()), equalTo(0L));
+ assertThat(breakdown.get(QueryTimingType.SCORE.toString()), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.MATCH.toString()), equalTo(0L));
+
+ assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count"), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString() + "_count"), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString() + "_count"), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.ADVANCE.toString() + "_count"), equalTo(0L));
+ assertThat(breakdown.get(QueryTimingType.SCORE.toString() + "_count"), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.MATCH.toString() + "_count"), equalTo(0L));
+
+ long rewriteTime = profiler.getRewriteTime();
+ assertThat(rewriteTime, greaterThan(0L));
+ }
+
+ public void testNoScoring() throws IOException {
+ QueryProfiler profiler = new QueryProfiler(executor != null);
+ searcher.setProfiler(profiler);
+ Query query = new TermQuery(new Term("foo", "bar"));
+ searcher.search(query, 1, Sort.INDEXORDER); // scores are not needed
+ List results = profiler.getTree();
+ assertEquals(1, results.size());
+ Map breakdown = results.get(0).getTimeBreakdown();
+ assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString()), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString()), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.ADVANCE.toString()), equalTo(0L));
+ assertThat(breakdown.get(QueryTimingType.SCORE.toString()), equalTo(0L));
+ assertThat(breakdown.get(QueryTimingType.MATCH.toString()), equalTo(0L));
+
+ assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count"), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString() + "_count"), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString() + "_count"), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.ADVANCE.toString() + "_count"), equalTo(0L));
+ assertThat(breakdown.get(QueryTimingType.SCORE.toString() + "_count"), equalTo(0L));
+ assertThat(breakdown.get(QueryTimingType.MATCH.toString() + "_count"), equalTo(0L));
+
+ long rewriteTime = profiler.getRewriteTime();
+ assertThat(rewriteTime, greaterThan(0L));
+ }
+
+ public void testUseIndexStats() throws IOException {
+ QueryProfiler profiler = new QueryProfiler(executor != null);
+ searcher.setProfiler(profiler);
+ Query query = new TermQuery(new Term("foo", "bar"));
+ searcher.count(query); // will use index stats
+ List results = profiler.getTree();
+ assertEquals(1, results.size());
+ ProfileResult result = results.get(0);
+ assertEquals(0, (long) result.getTimeBreakdown().get("build_scorer_count"));
+
+ long rewriteTime = profiler.getRewriteTime();
+ assertThat(rewriteTime, greaterThan(0L));
+ }
+
+ public void testApproximations() throws IOException {
+ QueryProfiler profiler = new QueryProfiler(executor != null);
+ searcher.setProfiler(profiler);
+ Query query = new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random());
+ searcher.count(query);
+ List results = profiler.getTree();
+ assertEquals(1, results.size());
+ Map breakdown = results.get(0).getTimeBreakdown();
+ assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString()), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString()), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.ADVANCE.toString()), equalTo(0L));
+ assertThat(breakdown.get(QueryTimingType.SCORE.toString()), equalTo(0L));
+ assertThat(breakdown.get(QueryTimingType.MATCH.toString()), greaterThan(0L));
+
+ assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count"), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString() + "_count"), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString() + "_count"), greaterThan(0L));
+ assertThat(breakdown.get(QueryTimingType.ADVANCE.toString() + "_count"), equalTo(0L));
+ assertThat(breakdown.get(QueryTimingType.SCORE.toString() + "_count"), equalTo(0L));
+ assertThat(breakdown.get(QueryTimingType.MATCH.toString() + "_count"), greaterThan(0L));
+
+ long rewriteTime = profiler.getRewriteTime();
+ assertThat(rewriteTime, greaterThan(0L));
+ }
+
+ public void testCollector() throws IOException {
+ TotalHitCountCollector collector = new TotalHitCountCollector();
+ ProfileCollector profileCollector = new ProfileCollector(collector);
+ assertEquals(0, profileCollector.getTime());
+ final LeafCollector leafCollector = profileCollector.getLeafCollector(reader.leaves().get(0));
+ assertThat(profileCollector.getTime(), greaterThan(0L));
+ long time = profileCollector.getTime();
+ leafCollector.setScorer(null);
+ assertThat(profileCollector.getTime(), greaterThan(time));
+ time = profileCollector.getTime();
+ leafCollector.collect(0);
+ assertThat(profileCollector.getTime(), greaterThan(time));
+ }
+
+ private static class DummyQuery extends Query {
+
+ @Override
+ public String toString(String field) {
+ return getClass().getSimpleName();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return this == obj;
+ }
+
+ @Override
+ public int hashCode() {
+ return 0;
+ }
+
+ @Override
+ public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
+ return new Weight(this) {
+ @Override
+ public Explanation explain(LeafReaderContext context, int doc) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Scorer scorer(LeafReaderContext context) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
+ return new ScorerSupplier() {
+
+ @Override
+ public Scorer get(long loadCost) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long cost() {
+ return 42;
+ }
+ };
+ }
+
+ @Override
+ public boolean isCacheable(LeafReaderContext ctx) {
+ return true;
+ }
+ };
+ }
+
+ @Override
+ public void visit(QueryVisitor visitor) {
+ visitor.visitLeaf(this);
+ }
+ }
+
+ public void testScorerSupplier() throws IOException {
+ Directory dir = newDirectory();
+ IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+ w.addDocument(new Document());
+ DirectoryReader reader = DirectoryReader.open(w);
+ w.close();
+ IndexSearcher s = newSearcher(reader);
+ s.setQueryCache(null);
+ Weight weight = s.createWeight(s.rewrite(new DummyQuery()), randomFrom(ScoreMode.values()), 1f);
+ // exception when getting the scorer
+ expectThrows(UnsupportedOperationException.class, () -> weight.scorer(s.getIndexReader().leaves().get(0)));
+ // no exception, means scorerSupplier is delegated
+ weight.scorerSupplier(s.getIndexReader().leaves().get(0));
+ reader.close();
+ dir.close();
+ }
+
+ private static final QueryCachingPolicy ALWAYS_CACHE_POLICY = new QueryCachingPolicy() {
+
+ @Override
+ public void onUse(Query query) {}
+
+ @Override
+ public boolean shouldCache(Query query) throws IOException {
+ return true;
+ }
+
+ };
+}
diff --git a/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java
new file mode 100644
index 0000000000000..83a0a63a6a5c8
--- /dev/null
+++ b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryPhaseTests.java
@@ -0,0 +1,1335 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*
+ * Modifications Copyright OpenSearch Contributors. See
+ * GitHub history for details.
+ */
+
+package org.opensearch.search.query;
+
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.LatLonDocValuesField;
+import org.apache.lucene.document.LatLonPoint;
+import org.apache.lucene.document.LongPoint;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.NoMergePolicy;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.CollectorManager;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.DocValuesFieldExistsQuery;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.FilterCollector;
+import org.apache.lucene.search.FilterLeafCollector;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.MatchNoDocsQuery;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.PrefixQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopFieldDocs;
+import org.apache.lucene.search.TotalHitCountCollector;
+import org.apache.lucene.search.TotalHits;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.grouping.CollapseTopFieldDocs;
+import org.apache.lucene.search.join.BitSetProducer;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.queries.spans.SpanNearQuery;
+import org.apache.lucene.queries.spans.SpanTermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.tests.index.RandomIndexWriter;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.FixedBitSet;
+import org.opensearch.action.search.SearchShardTask;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.index.mapper.DateFieldMapper;
+import org.opensearch.index.mapper.MappedFieldType;
+import org.opensearch.index.mapper.MapperService;
+import org.opensearch.index.mapper.NumberFieldMapper;
+import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType;
+import org.opensearch.index.mapper.NumberFieldMapper.NumberType;
+import org.opensearch.index.query.ParsedQuery;
+import org.opensearch.index.query.QueryShardContext;
+import org.opensearch.index.search.OpenSearchToParentBlockJoinQuery;
+import org.opensearch.index.shard.IndexShard;
+import org.opensearch.index.shard.IndexShardTestCase;
+import org.opensearch.lucene.queries.MinDocQuery;
+import org.opensearch.search.DocValueFormat;
+import org.opensearch.search.collapse.CollapseBuilder;
+import org.opensearch.search.internal.ContextIndexSearcher;
+import org.opensearch.search.internal.ScrollContext;
+import org.opensearch.search.internal.SearchContext;
+import org.opensearch.search.sort.SortAndFormats;
+import org.opensearch.tasks.TaskCancelledException;
+import org.opensearch.test.TestSearchContext;
+import org.opensearch.threadpool.ThreadPool;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import static org.opensearch.search.query.TopDocsCollectorContext.hasInfMaxScore;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.spy;
+
+public class QueryPhaseTests extends IndexShardTestCase {
+
+ private IndexShard indexShard;
+ private final ExecutorService executor;
+ private final QueryPhaseSearcher queryPhaseSearcher;
+
+ @ParametersFactory
+ public static Collection concurrency() {
+ return Arrays.asList(
+ new Object[] { 0, QueryPhase.DEFAULT_QUERY_PHASE_SEARCHER },
+ new Object[] { 5, new ConcurrentQueryPhaseSearcher() }
+ );
+ }
+
+ public QueryPhaseTests(int concurrency, QueryPhaseSearcher queryPhaseSearcher) {
+ this.executor = (concurrency > 0) ? Executors.newFixedThreadPool(concurrency) : null;
+ this.queryPhaseSearcher = queryPhaseSearcher;
+ }
+
+ @Override
+ public Settings threadPoolSettings() {
+ return Settings.builder().put(super.threadPoolSettings()).put("thread_pool.search.min_queue_size", 10).build();
+ }
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ indexShard = newShard(true);
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ super.tearDown();
+ closeShards(indexShard);
+
+ if (executor != null) {
+ ThreadPool.terminate(executor, 10, TimeUnit.SECONDS);
+ }
+ }
+
+ private void countTestCase(Query query, IndexReader reader, boolean shouldCollectSearch, boolean shouldCollectCount) throws Exception {
+ ContextIndexSearcher searcher = shouldCollectSearch
+ ? newContextSearcher(reader, executor)
+ : newEarlyTerminationContextSearcher(reader, 0, executor);
+ TestSearchContext context = new TestSearchContext(null, indexShard, searcher);
+ context.parsedQuery(new ParsedQuery(query));
+ context.setSize(0);
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ final boolean rescore = QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertFalse(rescore);
+
+ ContextIndexSearcher countSearcher = shouldCollectCount
+ ? newContextSearcher(reader, executor)
+ : newEarlyTerminationContextSearcher(reader, 0, executor);
+ assertEquals(countSearcher.count(query), context.queryResult().topDocs().topDocs.totalHits.value);
+ }
+
+ private void countTestCase(boolean withDeletions) throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; ++i) {
+ Document doc = new Document();
+ if (randomBoolean()) {
+ doc.add(new StringField("foo", "bar", Store.NO));
+ doc.add(new SortedSetDocValuesField("foo", new BytesRef("bar")));
+ doc.add(new SortedSetDocValuesField("docValuesOnlyField", new BytesRef("bar")));
+ doc.add(new LatLonDocValuesField("latLonDVField", 1.0, 1.0));
+ doc.add(new LatLonPoint("latLonDVField", 1.0, 1.0));
+ }
+ if (randomBoolean()) {
+ doc.add(new StringField("foo", "baz", Store.NO));
+ doc.add(new SortedSetDocValuesField("foo", new BytesRef("baz")));
+ }
+ if (withDeletions && (rarely() || i == 0)) {
+ doc.add(new StringField("delete", "yes", Store.NO));
+ }
+ w.addDocument(doc);
+ }
+ if (withDeletions) {
+ w.deleteDocuments(new Term("delete", "yes"));
+ }
+ final IndexReader reader = w.getReader();
+ Query matchAll = new MatchAllDocsQuery();
+ Query matchAllCsq = new ConstantScoreQuery(matchAll);
+ Query tq = new TermQuery(new Term("foo", "bar"));
+ Query tCsq = new ConstantScoreQuery(tq);
+ Query dvfeq = new DocValuesFieldExistsQuery("foo");
+ Query dvfeq_points = new DocValuesFieldExistsQuery("latLonDVField");
+ Query dvfeqCsq = new ConstantScoreQuery(dvfeq);
+ // field with doc-values but not indexed will need to collect
+ Query dvOnlyfeq = new DocValuesFieldExistsQuery("docValuesOnlyField");
+ BooleanQuery bq = new BooleanQuery.Builder().add(matchAll, Occur.SHOULD).add(tq, Occur.MUST).build();
+
+ countTestCase(matchAll, reader, false, false);
+ countTestCase(matchAllCsq, reader, false, false);
+ countTestCase(tq, reader, withDeletions, withDeletions);
+ countTestCase(tCsq, reader, withDeletions, withDeletions);
+ countTestCase(dvfeq, reader, withDeletions, true);
+ countTestCase(dvfeq_points, reader, withDeletions, true);
+ countTestCase(dvfeqCsq, reader, withDeletions, true);
+ countTestCase(dvOnlyfeq, reader, true, true);
+ countTestCase(bq, reader, true, true);
+ reader.close();
+ w.close();
+ dir.close();
+ }
+
+ public void testCountWithoutDeletions() throws Exception {
+ countTestCase(false);
+ }
+
+ public void testCountWithDeletions() throws Exception {
+ countTestCase(true);
+ }
+
+ public void testPostFilterDisablesCountOptimization() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ Document doc = new Document();
+ w.addDocument(doc);
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+
+ TestSearchContext context = new TestSearchContext(null, indexShard, newEarlyTerminationContextSearcher(reader, 0, executor));
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value);
+
+ context.setSearcher(newContextSearcher(reader, executor));
+ context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery()));
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value);
+ reader.close();
+ dir.close();
+ }
+
+ public void testTerminateAfterWithFilter() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ Document doc = new Document();
+ for (int i = 0; i < 10; i++) {
+ doc.add(new StringField("foo", Integer.toString(i), Store.NO));
+ }
+ w.addDocument(doc);
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor));
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+ context.terminateAfter(1);
+ context.setSize(10);
+ for (int i = 0; i < 10; i++) {
+ context.parsedPostFilter(new ParsedQuery(new TermQuery(new Term("foo", Integer.toString(i)))));
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ }
+ reader.close();
+ dir.close();
+ }
+
+ public void testMinScoreDisablesCountOptimization() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ Document doc = new Document();
+ w.addDocument(doc);
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newEarlyTerminationContextSearcher(reader, 0, executor));
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+ context.setSize(0);
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value);
+
+ context.minimumScore(100);
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation);
+ reader.close();
+ dir.close();
+ }
+
+ public void testQueryCapturesThreadPoolStats() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig();
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; ++i) {
+ w.addDocument(new Document());
+ }
+ w.close();
+ IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor));
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ QuerySearchResult results = context.queryResult();
+ assertThat(results.serviceTimeEWMA(), greaterThanOrEqualTo(0L));
+ assertThat(results.nodeQueueSize(), greaterThanOrEqualTo(0));
+ reader.close();
+ dir.close();
+ }
+
+ public void testInOrderScrollOptimization() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; ++i) {
+ w.addDocument(new Document());
+ }
+ w.close();
+ IndexReader reader = DirectoryReader.open(dir);
+ ScrollContext scrollContext = new ScrollContext();
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor), scrollContext);
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+ scrollContext.lastEmittedDoc = null;
+ scrollContext.maxScore = Float.NaN;
+ scrollContext.totalHits = null;
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ int size = randomIntBetween(2, 5);
+ context.setSize(size);
+
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.terminateAfter(), equalTo(0));
+ assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
+
+ context.setSearcher(newEarlyTerminationContextSearcher(reader, size, executor));
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.terminateAfter(), equalTo(size));
+ assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0].doc, greaterThanOrEqualTo(size));
+ reader.close();
+ dir.close();
+ }
+
+ public void testTerminateAfterEarlyTermination() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig();
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; ++i) {
+ Document doc = new Document();
+ if (randomBoolean()) {
+ doc.add(new StringField("foo", "bar", Store.NO));
+ }
+ if (randomBoolean()) {
+ doc.add(new StringField("foo", "baz", Store.NO));
+ }
+ doc.add(new NumericDocValuesField("rank", numDocs - i));
+ w.addDocument(doc);
+ }
+ w.close();
+ final IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor));
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+
+ context.terminateAfter(numDocs);
+ {
+ context.setSize(10);
+ final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create(executor);
+ context.queryCollectorManagers().put(TotalHitCountCollector.class, manager);
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertFalse(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(10));
+ assertThat(manager.getTotalHits(), equalTo(numDocs));
+ }
+
+ context.terminateAfter(1);
+ {
+ context.setSize(1);
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertTrue(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+
+ context.setSize(0);
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertTrue(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0));
+ }
+
+ {
+ context.setSize(1);
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertTrue(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ }
+ {
+ context.setSize(1);
+ BooleanQuery bq = new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.SHOULD)
+ .add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD)
+ .build();
+ context.parsedQuery(new ParsedQuery(bq));
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertTrue(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+
+ context.setSize(0);
+ context.parsedQuery(new ParsedQuery(bq));
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertTrue(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0));
+ }
+ {
+ context.setSize(1);
+ final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create(executor, 1);
+ context.queryCollectorManagers().put(TotalHitCountCollector.class, manager);
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertTrue(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertThat(manager.getTotalHits(), equalTo(1));
+ context.queryCollectorManagers().clear();
+ }
+ {
+ context.setSize(0);
+ final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create(executor, 1);
+ context.queryCollectorManagers().put(TotalHitCountCollector.class, manager);
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertTrue(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0));
+ assertThat(manager.getTotalHits(), equalTo(1));
+ }
+
+ // tests with trackTotalHits and terminateAfter
+ context.terminateAfter(10);
+ context.setSize(0);
+ for (int trackTotalHits : new int[] { -1, 3, 76, 100 }) {
+ context.trackTotalHitsUpTo(trackTotalHits);
+ final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create(executor);
+ context.queryCollectorManagers().put(TotalHitCountCollector.class, manager);
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertTrue(context.queryResult().terminatedEarly());
+ if (trackTotalHits == -1) {
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(0L));
+ } else {
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) Math.min(trackTotalHits, 10)));
+ }
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0));
+ // The concurrent search terminates the collection when the number of hits is reached by each
+ // concurrent collector. In this case, in general, the number of results are multiplied by the number of
+ // slices (as the unit of concurrency). To address that, we have to use the shared global state,
+ // much as HitsThresholdChecker does.
+ if (executor == null) {
+ assertThat(manager.getTotalHits(), equalTo(10));
+ }
+ }
+
+ context.terminateAfter(7);
+ context.setSize(10);
+ for (int trackTotalHits : new int[] { -1, 3, 75, 100 }) {
+ context.trackTotalHitsUpTo(trackTotalHits);
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertTrue(context.queryResult().terminatedEarly());
+ if (trackTotalHits == -1) {
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(0L));
+ } else {
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(7L));
+ }
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(7));
+ }
+ reader.close();
+ dir.close();
+ }
+
+ public void testIndexSortingEarlyTermination() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; ++i) {
+ Document doc = new Document();
+ if (randomBoolean()) {
+ doc.add(new StringField("foo", "bar", Store.NO));
+ }
+ if (randomBoolean()) {
+ doc.add(new StringField("foo", "baz", Store.NO));
+ }
+ doc.add(new NumericDocValuesField("rank", numDocs - i));
+ w.addDocument(doc);
+ }
+ w.close();
+
+ final IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor));
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+ context.setSize(1);
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW }));
+
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
+ FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0];
+ assertThat(fieldDoc.fields[0], equalTo(1));
+
+ {
+ context.parsedPostFilter(new ParsedQuery(new MinDocQuery(1)));
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(numDocs - 1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
+ assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2)));
+ context.parsedPostFilter(null);
+
+ final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create(executor, sort);
+ context.queryCollectorManagers().put(TotalHitCountCollector.class, manager);
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
+ assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2)));
+ // When searching concurrently, each executors short-circuits when "size" is reached,
+ // including total hits collector
+ assertThat(manager.getTotalHits(), lessThanOrEqualTo(numDocs));
+
+ context.queryCollectorManagers().clear();
+ }
+
+ {
+ context.setSearcher(newEarlyTerminationContextSearcher(reader, 1, executor));
+ context.trackTotalHitsUpTo(SearchContext.TRACK_TOTAL_HITS_DISABLED);
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
+ assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2)));
+
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
+ assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2)));
+ }
+ reader.close();
+ dir.close();
+ }
+
+ public void testIndexSortScrollOptimization() throws Exception {
+ Directory dir = newDirectory();
+ final Sort indexSort = new Sort(new SortField("rank", SortField.Type.INT), new SortField("tiebreaker", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(indexSort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; ++i) {
+ Document doc = new Document();
+ doc.add(new NumericDocValuesField("rank", random().nextInt()));
+ doc.add(new NumericDocValuesField("tiebreaker", i));
+ w.addDocument(doc);
+ }
+ if (randomBoolean()) {
+ w.forceMerge(randomIntBetween(1, 10));
+ }
+ w.close();
+
+ final IndexReader reader = DirectoryReader.open(dir);
+ List searchSortAndFormats = new ArrayList<>();
+ searchSortAndFormats.add(new SortAndFormats(indexSort, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW }));
+ // search sort is a prefix of the index sort
+ searchSortAndFormats.add(new SortAndFormats(new Sort(indexSort.getSort()[0]), new DocValueFormat[] { DocValueFormat.RAW }));
+ for (SortAndFormats searchSortAndFormat : searchSortAndFormats) {
+ ScrollContext scrollContext = new ScrollContext();
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor), scrollContext);
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+ scrollContext.lastEmittedDoc = null;
+ scrollContext.maxScore = Float.NaN;
+ scrollContext.totalHits = null;
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.setSize(10);
+ context.sort(searchSortAndFormat);
+
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.terminateAfter(), equalTo(0));
+ assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
+ int sizeMinus1 = context.queryResult().topDocs().topDocs.scoreDocs.length - 1;
+ FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[sizeMinus1];
+
+ context.setSearcher(newEarlyTerminationContextSearcher(reader, 10, executor));
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.terminateAfter(), equalTo(0));
+ assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
+ FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0];
+ for (int i = 0; i < searchSortAndFormat.sort.getSort().length; i++) {
+ @SuppressWarnings("unchecked")
+ FieldComparator comparator = (FieldComparator) searchSortAndFormat.sort.getSort()[i].getComparator(
+ i,
+ false
+ );
+ int cmp = comparator.compareValues(firstDoc.fields[i], lastDoc.fields[i]);
+ if (cmp == 0) {
+ continue;
+ }
+ assertThat(cmp, equalTo(1));
+ break;
+ }
+ }
+ reader.close();
+ dir.close();
+ }
+
+ public void testDisableTopScoreCollection() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(new StandardAnalyzer());
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ Document doc = new Document();
+ final int numDocs = 2 * scaledRandomIntBetween(50, 450);
+ for (int i = 0; i < numDocs; i++) {
+ doc.clear();
+ if (i % 2 == 0) {
+ doc.add(new TextField("title", "foo bar", Store.NO));
+ } else {
+ doc.add(new TextField("title", "foo", Store.NO));
+ }
+ w.addDocument(doc);
+ }
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor));
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ Query q = new SpanNearQuery.Builder("title", true).addClause(new SpanTermQuery(new Term("title", "foo")))
+ .addClause(new SpanTermQuery(new Term("title", "bar")))
+ .build();
+
+ context.parsedQuery(new ParsedQuery(q));
+ context.setSize(3);
+ context.trackTotalHitsUpTo(3);
+ TopDocsCollectorContext topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false);
+ assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.COMPLETE);
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.EQUAL_TO);
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3));
+
+ context.sort(new SortAndFormats(new Sort(new SortField("other", SortField.Type.INT)), new DocValueFormat[] { DocValueFormat.RAW }));
+ topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false);
+ assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.TOP_DOCS);
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3));
+ assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO);
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testEnhanceSortOnNumeric() throws Exception {
+ final String fieldNameLong = "long-field";
+ final String fieldNameDate = "date-field";
+ MappedFieldType fieldTypeLong = new NumberFieldMapper.NumberFieldType(fieldNameLong, NumberFieldMapper.NumberType.LONG);
+ MappedFieldType fieldTypeDate = new DateFieldMapper.DateFieldType(fieldNameDate);
+ MapperService mapperService = mock(MapperService.class);
+ when(mapperService.fieldType(fieldNameLong)).thenReturn(fieldTypeLong);
+ when(mapperService.fieldType(fieldNameDate)).thenReturn(fieldTypeDate);
+ // enough docs to have a tree with several leaf nodes
+ final int numDocs = 3500 * 5;
+ Directory dir = newDirectory();
+ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(null));
+ long firstValue = randomLongBetween(-10000000L, 10000000L);
+ long longValue = firstValue;
+ long dateValue = randomLongBetween(0, 3000000000000L);
+ for (int i = 1; i <= numDocs; ++i) {
+ Document doc = new Document();
+
+ doc.add(new LongPoint(fieldNameLong, longValue));
+ doc.add(new NumericDocValuesField(fieldNameLong, longValue));
+
+ doc.add(new LongPoint(fieldNameDate, dateValue));
+ doc.add(new NumericDocValuesField(fieldNameDate, dateValue));
+ writer.addDocument(doc);
+ longValue++;
+ dateValue++;
+ if (i % 3500 == 0) writer.commit();
+ }
+ writer.close();
+ final IndexReader reader = DirectoryReader.open(dir);
+ final SortField sortFieldLong = new SortField(fieldNameLong, SortField.Type.LONG);
+ sortFieldLong.setMissingValue(Long.MAX_VALUE);
+ final SortField sortFieldDate = new SortField(fieldNameDate, SortField.Type.LONG);
+ sortFieldDate.setMissingValue(Long.MAX_VALUE);
+ DocValueFormat dateFormat = fieldTypeDate.docValueFormat(null, null);
+ final Sort longSort = new Sort(sortFieldLong);
+ final Sort longDateSort = new Sort(sortFieldLong, sortFieldDate);
+ final Sort dateSort = new Sort(sortFieldDate);
+ final Sort dateLongSort = new Sort(sortFieldDate, sortFieldLong);
+ SortAndFormats longSortAndFormats = new SortAndFormats(longSort, new DocValueFormat[] { DocValueFormat.RAW });
+ SortAndFormats longDateSortAndFormats = new SortAndFormats(longDateSort, new DocValueFormat[] { DocValueFormat.RAW, dateFormat });
+ SortAndFormats dateSortAndFormats = new SortAndFormats(dateSort, new DocValueFormat[] { dateFormat });
+ SortAndFormats dateLongSortAndFormats = new SortAndFormats(dateLongSort, new DocValueFormat[] { dateFormat, DocValueFormat.RAW });
+ ParsedQuery query = new ParsedQuery(new MatchAllDocsQuery());
+ SearchShardTask task = new SearchShardTask(123L, "", "", "", null, Collections.emptyMap());
+
+ // 1. Test a sort on long field
+ {
+ TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)));
+ when(searchContext.mapperService()).thenReturn(mapperService);
+ searchContext.sort(longSortAndFormats);
+ searchContext.parsedQuery(query);
+ searchContext.setTask(task);
+ searchContext.setSize(10);
+ QueryPhase.executeInternal(searchContext.withCleanQueryResult(), queryPhaseSearcher);
+ assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, false);
+ }
+
+ // 2. Test a sort on long field + date field
+ {
+ TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)));
+ when(searchContext.mapperService()).thenReturn(mapperService);
+ searchContext.sort(longDateSortAndFormats);
+ searchContext.parsedQuery(query);
+ searchContext.setTask(task);
+ searchContext.setSize(10);
+ QueryPhase.executeInternal(searchContext.withCleanQueryResult(), queryPhaseSearcher);
+ assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, true);
+ }
+
+ // 3. Test a sort on date field
+ {
+ TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)));
+ when(searchContext.mapperService()).thenReturn(mapperService);
+ searchContext.sort(dateSortAndFormats);
+ searchContext.parsedQuery(query);
+ searchContext.setTask(task);
+ searchContext.setSize(10);
+ QueryPhase.executeInternal(searchContext.withCleanQueryResult(), queryPhaseSearcher);
+ assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, false);
+ }
+
+ // 4. Test a sort on date field + long field
+ {
+ TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)));
+ when(searchContext.mapperService()).thenReturn(mapperService);
+ searchContext.sort(dateLongSortAndFormats);
+ searchContext.parsedQuery(query);
+ searchContext.setTask(task);
+ searchContext.setSize(10);
+ QueryPhase.executeInternal(searchContext.withCleanQueryResult(), queryPhaseSearcher);
+ assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, true);
+ }
+
+ // 5. Test that sort optimization is run when from > 0 and size = 0
+ {
+ TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)));
+ when(searchContext.mapperService()).thenReturn(mapperService);
+ searchContext.sort(longSortAndFormats);
+ searchContext.parsedQuery(query);
+ searchContext.setTask(task);
+ searchContext.from(5);
+ searchContext.setSize(0);
+ QueryPhase.executeInternal(searchContext.withCleanQueryResult(), queryPhaseSearcher);
+ assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, false);
+ }
+
+ // 6. Test that sort optimization works with from = 0 and size= 0
+ {
+ TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)));
+ when(searchContext.mapperService()).thenReturn(mapperService);
+ searchContext.sort(longSortAndFormats);
+ searchContext.parsedQuery(query);
+ searchContext.setTask(task);
+ searchContext.setSize(0);
+ QueryPhase.executeInternal(searchContext.withCleanQueryResult(), queryPhaseSearcher);
+ }
+
+ // 7. Test that sort optimization works with search after
+ {
+ TestSearchContext searchContext = spy(new TestSearchContext(null, indexShard, newContextSearcher(reader, executor)));
+ when(searchContext.mapperService()).thenReturn(mapperService);
+ int afterDocument = (int) randomLongBetween(0, 50);
+ long afterValue = firstValue + afterDocument;
+ FieldDoc after = new FieldDoc(afterDocument, Float.NaN, new Long[] { afterValue });
+ searchContext.searchAfter(after);
+ searchContext.sort(longSortAndFormats);
+ searchContext.parsedQuery(query);
+ searchContext.setTask(task);
+ searchContext.setSize(10);
+ QueryPhase.executeInternal(searchContext.withCleanQueryResult(), queryPhaseSearcher);
+ final TopDocs topDocs = searchContext.queryResult().topDocs().topDocs;
+ long topValue = (long) ((FieldDoc) topDocs.scoreDocs[0]).fields[0];
+ assertThat(topValue, greaterThan(afterValue));
+ assertSortResults(topDocs, (long) numDocs, false);
+
+ final TotalHits totalHits = topDocs.totalHits;
+ assertEquals(TotalHits.Relation.EQUAL_TO, totalHits.relation);
+ assertEquals(numDocs, totalHits.value);
+ }
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testMaxScoreQueryVisitor() {
+ BitSetProducer producer = context -> new FixedBitSet(1);
+ Query query = new OpenSearchToParentBlockJoinQuery(new MatchAllDocsQuery(), producer, ScoreMode.Avg, "nested");
+ assertTrue(hasInfMaxScore(query));
+
+ query = new OpenSearchToParentBlockJoinQuery(new MatchAllDocsQuery(), producer, ScoreMode.None, "nested");
+ assertFalse(hasInfMaxScore(query));
+
+ for (Occur occur : Occur.values()) {
+ query = new BooleanQuery.Builder().add(
+ new OpenSearchToParentBlockJoinQuery(new MatchAllDocsQuery(), producer, ScoreMode.Avg, "nested"),
+ occur
+ ).build();
+ if (occur == Occur.MUST) {
+ assertTrue(hasInfMaxScore(query));
+ } else {
+ assertFalse(hasInfMaxScore(query));
+ }
+
+ query = new BooleanQuery.Builder().add(
+ new BooleanQuery.Builder().add(
+ new OpenSearchToParentBlockJoinQuery(new MatchAllDocsQuery(), producer, ScoreMode.Avg, "nested"),
+ occur
+ ).build(),
+ occur
+ ).build();
+ if (occur == Occur.MUST) {
+ assertTrue(hasInfMaxScore(query));
+ } else {
+ assertFalse(hasInfMaxScore(query));
+ }
+
+ query = new BooleanQuery.Builder().add(
+ new BooleanQuery.Builder().add(
+ new OpenSearchToParentBlockJoinQuery(new MatchAllDocsQuery(), producer, ScoreMode.Avg, "nested"),
+ occur
+ ).build(),
+ Occur.FILTER
+ ).build();
+ assertFalse(hasInfMaxScore(query));
+
+ query = new BooleanQuery.Builder().add(
+ new BooleanQuery.Builder().add(new SpanTermQuery(new Term("field", "foo")), occur)
+ .add(new OpenSearchToParentBlockJoinQuery(new MatchAllDocsQuery(), producer, ScoreMode.Avg, "nested"), occur)
+ .build(),
+ occur
+ ).build();
+ if (occur == Occur.MUST) {
+ assertTrue(hasInfMaxScore(query));
+ } else {
+ assertFalse(hasInfMaxScore(query));
+ }
+ }
+ }
+
+ // assert score docs are in order and their number is as expected
+ private void assertSortResults(TopDocs topDocs, long expectedNumDocs, boolean isDoubleSort) {
+ if (topDocs.totalHits.relation == TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO) {
+ assertThat(topDocs.totalHits.value, lessThanOrEqualTo(expectedNumDocs));
+ } else {
+ assertEquals(topDocs.totalHits.value, expectedNumDocs);
+ }
+ long cur1, cur2;
+ long prev1 = Long.MIN_VALUE;
+ long prev2 = Long.MIN_VALUE;
+ for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
+ cur1 = (long) ((FieldDoc) scoreDoc).fields[0];
+ assertThat(cur1, greaterThanOrEqualTo(prev1)); // test that docs are properly sorted on the first sort
+ if (isDoubleSort) {
+ cur2 = (long) ((FieldDoc) scoreDoc).fields[1];
+ if (cur1 == prev1) {
+ assertThat(cur2, greaterThanOrEqualTo(prev2)); // test that docs are properly sorted on the secondary sort
+ }
+ prev2 = cur2;
+ }
+ prev1 = cur1;
+ }
+ }
+
+ public void testMinScore() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig();
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ for (int i = 0; i < 10; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("foo", "bar", Store.NO));
+ doc.add(new StringField("filter", "f1", Store.NO));
+ w.addDocument(doc);
+ }
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor));
+ context.parsedQuery(
+ new ParsedQuery(
+ new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST)
+ .add(new TermQuery(new Term("filter", "f1")), Occur.SHOULD)
+ .build()
+ )
+ );
+ context.minimumScore(0.01f);
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.setSize(1);
+ context.trackTotalHitsUpTo(5);
+
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertEquals(10, context.queryResult().topDocs().topDocs.totalHits.value);
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testMaxScore() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("filter", SortField.Type.STRING));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("foo", "bar", Store.NO));
+ doc.add(new StringField("filter", "f1" + ((i > 0) ? " " + Integer.toString(i) : ""), Store.NO));
+ doc.add(new SortedDocValuesField("filter", newBytesRef("f1" + ((i > 0) ? " " + Integer.toString(i) : ""))));
+ w.addDocument(doc);
+ }
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor));
+ context.trackScores(true);
+ context.parsedQuery(
+ new ParsedQuery(
+ new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST)
+ .add(new TermQuery(new Term("filter", "f1")), Occur.SHOULD)
+ .build()
+ )
+ );
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.setSize(1);
+ context.trackTotalHitsUpTo(5);
+
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertFalse(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L));
+
+ context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW }));
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertFalse(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L));
+
+ context.trackScores(false);
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertTrue(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L));
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testCollapseQuerySearchResults() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("user", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+
+ // Always end up with uneven buckets so collapsing is predictable
+ final int numDocs = 2 * scaledRandomIntBetween(600, 900) - 1;
+ for (int i = 0; i < numDocs; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("foo", "bar", Store.NO));
+ doc.add(new NumericDocValuesField("user", i & 1));
+ w.addDocument(doc);
+ }
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+ QueryShardContext queryShardContext = mock(QueryShardContext.class);
+ when(queryShardContext.fieldMapper("user")).thenReturn(
+ new NumberFieldType("user", NumberType.INTEGER, true, false, true, false, null, Collections.emptyMap())
+ );
+
+ TestSearchContext context = new TestSearchContext(queryShardContext, indexShard, newContextSearcher(reader, executor));
+ context.collapse(new CollapseBuilder("user").build(context.getQueryShardContext()));
+ context.trackScores(true);
+ context.parsedQuery(new ParsedQuery(new TermQuery(new Term("foo", "bar"))));
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.setSize(2);
+ context.trackTotalHitsUpTo(5);
+
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertFalse(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class));
+
+ CollapseTopFieldDocs topDocs = (CollapseTopFieldDocs) context.queryResult().topDocs().topDocs;
+ assertThat(topDocs.collapseValues.length, equalTo(2));
+ assertThat(topDocs.collapseValues[0], equalTo(0L)); // user == 0
+ assertThat(topDocs.collapseValues[1], equalTo(1L)); // user == 1
+
+ context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW }));
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertFalse(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class));
+
+ topDocs = (CollapseTopFieldDocs) context.queryResult().topDocs().topDocs;
+ assertThat(topDocs.collapseValues.length, equalTo(2));
+ assertThat(topDocs.collapseValues[0], equalTo(0L)); // user == 0
+ assertThat(topDocs.collapseValues[1], equalTo(1L)); // user == 1
+
+ context.trackScores(false);
+ QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher);
+ assertTrue(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class));
+
+ topDocs = (CollapseTopFieldDocs) context.queryResult().topDocs().topDocs;
+ assertThat(topDocs.collapseValues.length, equalTo(2));
+ assertThat(topDocs.collapseValues[0], equalTo(0L)); // user == 0
+ assertThat(topDocs.collapseValues[1], equalTo(1L)); // user == 1
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testCancellationDuringPreprocess() throws IOException {
+ try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig())) {
+
+ for (int i = 0; i < 10; i++) {
+ Document doc = new Document();
+ StringBuilder sb = new StringBuilder();
+ for (int j = 0; j < i; j++) {
+ sb.append('a');
+ }
+ doc.add(new StringField("foo", sb.toString(), Store.NO));
+ w.addDocument(doc);
+ }
+ w.flush();
+ w.close();
+
+ try (IndexReader reader = DirectoryReader.open(dir)) {
+ TestSearchContext context = new TestSearchContextWithRewriteAndCancellation(
+ null,
+ indexShard,
+ newContextSearcher(reader, executor)
+ );
+ PrefixQuery prefixQuery = new PrefixQuery(new Term("foo", "a"));
+ prefixQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
+ context.parsedQuery(new ParsedQuery(prefixQuery));
+ SearchShardTask task = mock(SearchShardTask.class);
+ when(task.isCancelled()).thenReturn(true);
+ context.setTask(task);
+ expectThrows(TaskCancelledException.class, () -> new QueryPhase().preProcess(context));
+ }
+ }
+ }
+
+ private static class TestSearchContextWithRewriteAndCancellation extends TestSearchContext {
+
+ private TestSearchContextWithRewriteAndCancellation(
+ QueryShardContext queryShardContext,
+ IndexShard indexShard,
+ ContextIndexSearcher searcher
+ ) {
+ super(queryShardContext, indexShard, searcher);
+ }
+
+ @Override
+ public void preProcess(boolean rewrite) {
+ try {
+ searcher().rewrite(query());
+ } catch (IOException e) {
+ fail("IOException shouldn't be thrown");
+ }
+ }
+
+ @Override
+ public boolean lowLevelCancellation() {
+ return true;
+ }
+ }
+
+ private static ContextIndexSearcher newContextSearcher(IndexReader reader, ExecutorService executor) throws IOException {
+ return new ContextIndexSearcher(
+ reader,
+ IndexSearcher.getDefaultSimilarity(),
+ IndexSearcher.getDefaultQueryCache(),
+ IndexSearcher.getDefaultQueryCachingPolicy(),
+ true,
+ executor
+ );
+ }
+
+ private static ContextIndexSearcher newEarlyTerminationContextSearcher(IndexReader reader, int size, ExecutorService executor)
+ throws IOException {
+ return new ContextIndexSearcher(
+ reader,
+ IndexSearcher.getDefaultSimilarity(),
+ IndexSearcher.getDefaultQueryCache(),
+ IndexSearcher.getDefaultQueryCachingPolicy(),
+ true,
+ executor
+ ) {
+
+ @Override
+ public void search(List leaves, Weight weight, Collector collector) throws IOException {
+ final Collector in = new AssertingEarlyTerminationFilterCollector(collector, size);
+ super.search(leaves, weight, in);
+ }
+ };
+ }
+
+ // used to check that numeric long or date sort optimization was run
+ private static ContextIndexSearcher newOptimizedContextSearcher(IndexReader reader, int queryType, ExecutorService executor)
+ throws IOException {
+ return new ContextIndexSearcher(
+ reader,
+ IndexSearcher.getDefaultSimilarity(),
+ IndexSearcher.getDefaultQueryCache(),
+ IndexSearcher.getDefaultQueryCachingPolicy(),
+ true,
+ executor
+ ) {
+
+ @Override
+ public void search(
+ Query query,
+ CollectorManager, TopFieldDocs> manager,
+ QuerySearchResult result,
+ DocValueFormat[] formats,
+ TotalHits totalHits
+ ) throws IOException {
+ assertTrue(query instanceof BooleanQuery);
+ List clauses = ((BooleanQuery) query).clauses();
+ assertTrue(clauses.size() == 2);
+ assertTrue(clauses.get(0).getOccur() == Occur.FILTER);
+ assertTrue(clauses.get(1).getOccur() == Occur.SHOULD);
+ if (queryType == 0) {
+ assertTrue(
+ clauses.get(1).getQuery().getClass() == LongPoint.newDistanceFeatureQuery("random_field", 1, 1, 1).getClass()
+ );
+ }
+ if (queryType == 1) assertTrue(clauses.get(1).getQuery() instanceof DocValuesFieldExistsQuery);
+ super.search(query, manager, result, formats, totalHits);
+ }
+
+ @Override
+ public void search(
+ List leaves,
+ Weight weight,
+ @SuppressWarnings("rawtypes") CollectorManager manager,
+ QuerySearchResult result,
+ DocValueFormat[] formats,
+ TotalHits totalHits
+ ) throws IOException {
+ final Query query = weight.getQuery();
+ assertTrue(query instanceof BooleanQuery);
+ List clauses = ((BooleanQuery) query).clauses();
+ assertTrue(clauses.size() == 2);
+ assertTrue(clauses.get(0).getOccur() == Occur.FILTER);
+ assertTrue(clauses.get(1).getOccur() == Occur.SHOULD);
+ if (queryType == 0) {
+ assertTrue(
+ clauses.get(1).getQuery().getClass() == LongPoint.newDistanceFeatureQuery("random_field", 1, 1, 1).getClass()
+ );
+ }
+ if (queryType == 1) assertTrue(clauses.get(1).getQuery() instanceof DocValuesFieldExistsQuery);
+ super.search(leaves, weight, manager, result, formats, totalHits);
+ }
+
+ @Override
+ public void search(List leaves, Weight weight, Collector collector) throws IOException {
+ if (getExecutor() == null) {
+ assert (false); // should not be there, expected to search with CollectorManager
+ } else {
+ super.search(leaves, weight, collector);
+ }
+ }
+ };
+ }
+
+ private static class TestTotalHitCountCollectorManager extends TotalHitCountCollectorManager {
+ private int totalHits;
+ private final TotalHitCountCollector collector;
+ private final Integer teminateAfter;
+
+ static TestTotalHitCountCollectorManager create(final ExecutorService executor) {
+ return create(executor, null, null);
+ }
+
+ static TestTotalHitCountCollectorManager create(final ExecutorService executor, final Integer teminateAfter) {
+ return create(executor, null, teminateAfter);
+ }
+
+ static TestTotalHitCountCollectorManager create(final ExecutorService executor, final Sort sort) {
+ return create(executor, sort, null);
+ }
+
+ static TestTotalHitCountCollectorManager create(final ExecutorService executor, final Sort sort, final Integer teminateAfter) {
+ if (executor == null) {
+ return new TestTotalHitCountCollectorManager(new TotalHitCountCollector(), sort);
+ } else {
+ return new TestTotalHitCountCollectorManager(sort, teminateAfter);
+ }
+ }
+
+ private TestTotalHitCountCollectorManager(final TotalHitCountCollector collector, final Sort sort) {
+ super(sort);
+ this.collector = collector;
+ this.teminateAfter = null;
+ }
+
+ private TestTotalHitCountCollectorManager(final Sort sort, final Integer teminateAfter) {
+ super(sort);
+ this.collector = null;
+ this.teminateAfter = teminateAfter;
+ }
+
+ @Override
+ public TotalHitCountCollector newCollector() throws IOException {
+ return (collector == null) ? super.newCollector() : collector;
+ }
+
+ @Override
+ public ReduceableSearchResult reduce(Collection collectors) throws IOException {
+ final ReduceableSearchResult result = super.reduce(collectors);
+ totalHits = collectors.stream().mapToInt(TotalHitCountCollector::getTotalHits).sum();
+
+ if (teminateAfter != null) {
+ assertThat(totalHits, greaterThanOrEqualTo(teminateAfter));
+ totalHits = Math.min(totalHits, teminateAfter);
+ }
+
+ return result;
+ }
+
+ public int getTotalHits() {
+ return (collector == null) ? totalHits : collector.getTotalHits();
+ }
+ }
+
+ private static class AssertingEarlyTerminationFilterCollector extends FilterCollector {
+ private final int size;
+
+ AssertingEarlyTerminationFilterCollector(Collector in, int size) {
+ super(in);
+ this.size = size;
+ }
+
+ @Override
+ public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
+ final LeafCollector in = super.getLeafCollector(context);
+ return new FilterLeafCollector(in) {
+ int collected;
+
+ @Override
+ public void collect(int doc) throws IOException {
+ assert collected <= size : "should not collect more than " + size + " doc per segment, got " + collected;
+ ++collected;
+ super.collect(doc);
+ }
+ };
+ }
+ }
+}
diff --git a/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java
new file mode 100644
index 0000000000000..d2cb77f529793
--- /dev/null
+++ b/sandbox/plugins/concurrent-search/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java
@@ -0,0 +1,1182 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.search.query;
+
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.spans.SpanNearQuery;
+import org.apache.lucene.queries.spans.SpanTermQuery;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.grouping.CollapseTopFieldDocs;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.FilterCollector;
+import org.apache.lucene.search.FilterLeafCollector;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.MatchNoDocsQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TotalHits;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.tests.index.RandomIndexWriter;
+import org.apache.lucene.store.Directory;
+import org.opensearch.action.search.SearchShardTask;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.common.xcontent.ToXContent;
+import org.opensearch.common.xcontent.XContentBuilder;
+import org.opensearch.common.xcontent.json.JsonXContent;
+import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType;
+import org.opensearch.index.mapper.NumberFieldMapper.NumberType;
+import org.opensearch.index.query.ParsedQuery;
+import org.opensearch.index.query.QueryShardContext;
+import org.opensearch.index.shard.IndexShard;
+import org.opensearch.index.shard.IndexShardTestCase;
+import org.opensearch.lucene.queries.MinDocQuery;
+import org.opensearch.search.DocValueFormat;
+import org.opensearch.search.collapse.CollapseBuilder;
+import org.opensearch.search.internal.ContextIndexSearcher;
+import org.opensearch.search.internal.ScrollContext;
+import org.opensearch.search.internal.SearchContext;
+import org.opensearch.search.profile.ProfileResult;
+import org.opensearch.search.profile.ProfileShardResult;
+import org.opensearch.search.profile.SearchProfileShardResults;
+import org.opensearch.search.profile.query.CollectorResult;
+import org.opensearch.search.profile.query.QueryProfileShardResult;
+import org.opensearch.search.sort.SortAndFormats;
+import org.opensearch.test.TestSearchContext;
+import org.opensearch.threadpool.ThreadPool;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
+
+import static org.hamcrest.CoreMatchers.not;
+import static org.hamcrest.CoreMatchers.nullValue;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.empty;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+import static org.hamcrest.Matchers.hasSize;
+
+public class QueryProfilePhaseTests extends IndexShardTestCase {
+
+ private IndexShard indexShard;
+ private final ExecutorService executor;
+ private final QueryPhaseSearcher queryPhaseSearcher;
+
+ @ParametersFactory
+ public static Collection concurrency() {
+ return Arrays.asList(
+ new Object[] { 0, QueryPhase.DEFAULT_QUERY_PHASE_SEARCHER },
+ new Object[] { 5, new ConcurrentQueryPhaseSearcher() }
+ );
+ }
+
+ public QueryProfilePhaseTests(int concurrency, QueryPhaseSearcher queryPhaseSearcher) {
+ this.executor = (concurrency > 0) ? Executors.newFixedThreadPool(concurrency) : null;
+ this.queryPhaseSearcher = queryPhaseSearcher;
+ }
+
+ @Override
+ public Settings threadPoolSettings() {
+ return Settings.builder().put(super.threadPoolSettings()).put("thread_pool.search.min_queue_size", 10).build();
+ }
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ indexShard = newShard(true);
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ super.tearDown();
+ closeShards(indexShard);
+
+ if (executor != null) {
+ ThreadPool.terminate(executor, 10, TimeUnit.SECONDS);
+ }
+ }
+
+ public void testPostFilterDisablesCountOptimization() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ Document doc = new Document();
+ w.addDocument(doc);
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+
+ TestSearchContext context = new TestSearchContext(null, indexShard, newEarlyTerminationContextSearcher(reader, 0, executor));
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ context.setSearcher(newContextSearcher(reader, executor));
+ context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery()));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertProfileData(context, collector -> {
+ assertThat(collector.getReason(), equalTo("search_post_filter"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_count"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ }, (query) -> {
+ assertThat(query.getQueryName(), equalTo("MatchNoDocsQuery"));
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, (query) -> {
+ assertThat(query.getQueryName(), equalTo("MatchAllDocsQuery"));
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ });
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testTerminateAfterWithFilter() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ Document doc = new Document();
+ for (int i = 0; i < 10; i++) {
+ doc.add(new StringField("foo", Integer.toString(i), Store.NO));
+ }
+ w.addDocument(doc);
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor));
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+ context.terminateAfter(1);
+ context.setSize(10);
+ for (int i = 0; i < 10; i++) {
+ context.parsedPostFilter(new ParsedQuery(new TermQuery(new Term("foo", Integer.toString(i)))));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertProfileData(context, collector -> {
+ assertThat(collector.getReason(), equalTo("search_post_filter"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_terminate_after_count"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren().get(0).getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getProfiledChildren().get(0).getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getProfiledChildren().get(0).getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ }, (query) -> {
+ assertThat(query.getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, (query) -> {
+ assertThat(query.getQueryName(), equalTo("MatchAllDocsQuery"));
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(1L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ });
+ }
+ reader.close();
+ dir.close();
+ }
+
+ public void testMinScoreDisablesCountOptimization() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ Document doc = new Document();
+ w.addDocument(doc);
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newEarlyTerminationContextSearcher(reader, 0, executor));
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+ context.setSize(0);
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ context.minimumScore(100);
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation);
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThanOrEqualTo(100L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(1L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_min_score"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_count"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testInOrderScrollOptimization() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; ++i) {
+ w.addDocument(new Document());
+ }
+ w.close();
+ IndexReader reader = DirectoryReader.open(dir);
+ ScrollContext scrollContext = new ScrollContext();
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor), scrollContext);
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+ scrollContext.lastEmittedDoc = null;
+ scrollContext.maxScore = Float.NaN;
+ scrollContext.totalHits = null;
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ int size = randomIntBetween(2, 5);
+ context.setSize(size);
+
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.terminateAfter(), equalTo(0));
+ assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ context.setSearcher(newEarlyTerminationContextSearcher(reader, size, executor));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.terminateAfter(), equalTo(size));
+ assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0].doc, greaterThanOrEqualTo(size));
+ assertProfileData(context, "ConstantScoreQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_terminate_after_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testTerminateAfterEarlyTermination() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig();
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; ++i) {
+ Document doc = new Document();
+ if (randomBoolean()) {
+ doc.add(new StringField("foo", "bar", Store.NO));
+ }
+ if (randomBoolean()) {
+ doc.add(new StringField("foo", "baz", Store.NO));
+ }
+ doc.add(new NumericDocValuesField("rank", numDocs - i));
+ w.addDocument(doc);
+ }
+ w.close();
+ final IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor));
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+
+ context.terminateAfter(1);
+ {
+ context.setSize(1);
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertTrue(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_terminate_after_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+
+ context.setSize(0);
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertTrue(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0));
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_terminate_after_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_count"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+ }
+
+ {
+ context.setSize(1);
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertTrue(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_terminate_after_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+ }
+ {
+ context.setSize(1);
+ BooleanQuery bq = new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.SHOULD)
+ .add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD)
+ .build();
+ context.parsedQuery(new ParsedQuery(bq));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertTrue(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertProfileData(context, "BooleanQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren(), hasSize(2));
+ assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_terminate_after_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+ context.setSize(0);
+ context.parsedQuery(new ParsedQuery(bq));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertTrue(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0));
+
+ assertProfileData(context, "BooleanQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren(), hasSize(2));
+ assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), equalTo(0L));
+
+ assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score_count"), equalTo(0L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_terminate_after_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_count"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+ }
+
+ context.terminateAfter(7);
+ context.setSize(10);
+ for (int trackTotalHits : new int[] { -1, 3, 75, 100 }) {
+ context.trackTotalHitsUpTo(trackTotalHits);
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertTrue(context.queryResult().terminatedEarly());
+ if (trackTotalHits == -1) {
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(0L));
+ } else {
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(7L));
+ }
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(7));
+ assertProfileData(context, "BooleanQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(7L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren(), hasSize(2));
+ assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), greaterThan(0L));
+
+ assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score_count"), greaterThan(0L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_terminate_after_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+ }
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testIndexSortingEarlyTermination() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; ++i) {
+ Document doc = new Document();
+ if (randomBoolean()) {
+ doc.add(new StringField("foo", "bar", Store.NO));
+ }
+ if (randomBoolean()) {
+ doc.add(new StringField("foo", "baz", Store.NO));
+ }
+ doc.add(new NumericDocValuesField("rank", numDocs - i));
+ w.addDocument(doc);
+ }
+ w.close();
+
+ final IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor));
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+ context.setSize(1);
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW }));
+
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
+ FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0];
+ assertThat(fieldDoc.fields[0], equalTo(1));
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ {
+ context.parsedPostFilter(new ParsedQuery(new MinDocQuery(1)));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(numDocs - 1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
+ assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2)));
+ assertProfileData(context, collector -> {
+ assertThat(collector.getReason(), equalTo("search_post_filter"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ }, (query) -> {
+ assertThat(query.getQueryName(), equalTo("MinDocQuery"));
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, (query) -> {
+ assertThat(query.getQueryName(), equalTo("MatchAllDocsQuery"));
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ });
+ context.parsedPostFilter(null);
+ }
+
+ {
+ context.setSearcher(newEarlyTerminationContextSearcher(reader, 1, executor));
+ context.trackTotalHitsUpTo(SearchContext.TRACK_TOTAL_HITS_DISABLED);
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
+ assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2)));
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
+ assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2)));
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+ }
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testIndexSortScrollOptimization() throws Exception {
+ Directory dir = newDirectory();
+ final Sort indexSort = new Sort(new SortField("rank", SortField.Type.INT), new SortField("tiebreaker", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(indexSort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; ++i) {
+ Document doc = new Document();
+ doc.add(new NumericDocValuesField("rank", random().nextInt()));
+ doc.add(new NumericDocValuesField("tiebreaker", i));
+ w.addDocument(doc);
+ }
+ if (randomBoolean()) {
+ w.forceMerge(randomIntBetween(1, 10));
+ }
+ w.close();
+
+ final IndexReader reader = DirectoryReader.open(dir);
+ List searchSortAndFormats = new ArrayList<>();
+ searchSortAndFormats.add(new SortAndFormats(indexSort, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW }));
+ // search sort is a prefix of the index sort
+ searchSortAndFormats.add(new SortAndFormats(new Sort(indexSort.getSort()[0]), new DocValueFormat[] { DocValueFormat.RAW }));
+ for (SortAndFormats searchSortAndFormat : searchSortAndFormats) {
+ ScrollContext scrollContext = new ScrollContext();
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor), scrollContext);
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+ scrollContext.lastEmittedDoc = null;
+ scrollContext.maxScore = Float.NaN;
+ scrollContext.totalHits = null;
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.setSize(10);
+ context.sort(searchSortAndFormat);
+
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.terminateAfter(), equalTo(0));
+ assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ int sizeMinus1 = context.queryResult().topDocs().topDocs.scoreDocs.length - 1;
+ FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[sizeMinus1];
+
+ context.setSearcher(newEarlyTerminationContextSearcher(reader, 10, executor));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.terminateAfter(), equalTo(0));
+ assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
+ assertProfileData(context, "ConstantScoreQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren(), hasSize(1));
+ assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("SearchAfterSortedDocQuery"));
+ assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+ FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0];
+ for (int i = 0; i < searchSortAndFormat.sort.getSort().length; i++) {
+ @SuppressWarnings("unchecked")
+ FieldComparator comparator = (FieldComparator) searchSortAndFormat.sort.getSort()[i].getComparator(i, true);
+ int cmp = comparator.compareValues(firstDoc.fields[i], lastDoc.fields[i]);
+ if (cmp == 0) {
+ continue;
+ }
+ assertThat(cmp, equalTo(1));
+ break;
+ }
+ }
+ reader.close();
+ dir.close();
+ }
+
+ public void testDisableTopScoreCollection() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(new StandardAnalyzer());
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ Document doc = new Document();
+ final int numDocs = 2 * scaledRandomIntBetween(50, 450);
+ for (int i = 0; i < numDocs; i++) {
+ doc.clear();
+ if (i % 2 == 0) {
+ doc.add(new TextField("title", "foo bar", Store.NO));
+ } else {
+ doc.add(new TextField("title", "foo", Store.NO));
+ }
+ w.addDocument(doc);
+ }
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor));
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ Query q = new SpanNearQuery.Builder("title", true).addClause(new SpanTermQuery(new Term("title", "foo")))
+ .addClause(new SpanTermQuery(new Term("title", "bar")))
+ .build();
+
+ context.parsedQuery(new ParsedQuery(q));
+ context.setSize(3);
+ context.trackTotalHitsUpTo(3);
+ TopDocsCollectorContext topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false);
+ assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.COMPLETE);
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.EQUAL_TO);
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3));
+ assertProfileData(context, "SpanNearQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ context.sort(new SortAndFormats(new Sort(new SortField("other", SortField.Type.INT)), new DocValueFormat[] { DocValueFormat.RAW }));
+ topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false);
+ assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.TOP_DOCS);
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3));
+ assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO);
+ assertProfileData(context, "SpanNearQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testMinScore() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig();
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ for (int i = 0; i < 10; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("foo", "bar", Store.NO));
+ doc.add(new StringField("filter", "f1", Store.NO));
+ w.addDocument(doc);
+ }
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor));
+ context.parsedQuery(
+ new ParsedQuery(
+ new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST)
+ .add(new TermQuery(new Term("filter", "f1")), Occur.SHOULD)
+ .build()
+ )
+ );
+ context.minimumScore(0.01f);
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.setSize(1);
+ context.trackTotalHitsUpTo(5);
+
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertEquals(10, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertProfileData(context, "BooleanQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(10L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren(), hasSize(2));
+ assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_min_score"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testMaxScore() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("filter", SortField.Type.STRING));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("foo", "bar", Store.NO));
+ doc.add(new StringField("filter", "f1" + ((i > 0) ? " " + Integer.toString(i) : ""), Store.NO));
+ doc.add(new SortedDocValuesField("filter", newBytesRef("f1" + ((i > 0) ? " " + Integer.toString(i) : ""))));
+ w.addDocument(doc);
+ }
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader, executor));
+ context.trackScores(true);
+ context.parsedQuery(
+ new ParsedQuery(
+ new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST)
+ .add(new TermQuery(new Term("filter", "f1")), Occur.SHOULD)
+ .build()
+ )
+ );
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.setSize(1);
+ context.trackTotalHitsUpTo(5);
+
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertFalse(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L));
+ assertProfileData(context, "BooleanQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren(), hasSize(2));
+ assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW }));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertFalse(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L));
+ assertProfileData(context, "BooleanQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren(), hasSize(2));
+ assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testCollapseQuerySearchResults() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("user", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+
+ // Always end up with uneven buckets so collapsing is predictable
+ final int numDocs = 2 * scaledRandomIntBetween(600, 900) - 1;
+ for (int i = 0; i < numDocs; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("foo", "bar", Store.NO));
+ doc.add(new NumericDocValuesField("user", i & 1));
+ w.addDocument(doc);
+ }
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+ QueryShardContext queryShardContext = mock(QueryShardContext.class);
+ when(queryShardContext.fieldMapper("user")).thenReturn(
+ new NumberFieldType("user", NumberType.INTEGER, true, false, true, false, null, Collections.emptyMap())
+ );
+
+ TestSearchContext context = new TestSearchContext(queryShardContext, indexShard, newContextSearcher(reader, executor));
+ context.collapse(new CollapseBuilder("user").build(context.getQueryShardContext()));
+ context.trackScores(true);
+ context.parsedQuery(new ParsedQuery(new TermQuery(new Term("foo", "bar"))));
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.setSize(2);
+ context.trackTotalHitsUpTo(5);
+
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertFalse(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class));
+
+ assertProfileData(context, "TermQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ assertThat(query.getProfiledChildren(), empty());
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW }));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers(), queryPhaseSearcher);
+ assertFalse(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class));
+
+ assertProfileData(context, "TermQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ assertThat(query.getProfiledChildren(), empty());
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ reader.close();
+ dir.close();
+ }
+
+ private void assertProfileData(SearchContext context, String type, Consumer query, Consumer collector)
+ throws IOException {
+ assertProfileData(context, collector, (profileResult) -> {
+ assertThat(profileResult.getQueryName(), equalTo(type));
+ assertThat(profileResult.getTime(), greaterThan(0L));
+ query.accept(profileResult);
+ });
+ }
+
+ private void assertProfileData(SearchContext context, Consumer collector, Consumer query1)
+ throws IOException {
+ assertProfileData(context, Arrays.asList(query1), collector, false);
+ }
+
+ private void assertProfileData(
+ SearchContext context,
+ Consumer collector,
+ Consumer query1,
+ Consumer query2
+ ) throws IOException {
+ assertProfileData(context, Arrays.asList(query1, query2), collector, false);
+ }
+
+ private final void assertProfileData(
+ SearchContext context,
+ List> queries,
+ Consumer collector,
+ boolean debug
+ ) throws IOException {
+ assertThat(context.getProfilers(), not(nullValue()));
+
+ final ProfileShardResult result = SearchProfileShardResults.buildShardResults(context.getProfilers(), null);
+ if (debug) {
+ final SearchProfileShardResults results = new SearchProfileShardResults(
+ Collections.singletonMap(indexShard.shardId().toString(), result)
+ );
+
+ try (final XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint()) {
+ builder.startObject();
+ results.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ builder.flush();
+
+ final OutputStream out = builder.getOutputStream();
+ assertThat(out, instanceOf(ByteArrayOutputStream.class));
+
+ logger.info(new String(((ByteArrayOutputStream) out).toByteArray(), StandardCharsets.UTF_8));
+ }
+ }
+
+ assertThat(result.getQueryProfileResults(), hasSize(1));
+
+ final QueryProfileShardResult queryProfileShardResult = result.getQueryProfileResults().get(0);
+ assertThat(queryProfileShardResult.getQueryResults(), hasSize(queries.size()));
+
+ for (int i = 0; i < queries.size(); ++i) {
+ queries.get(i).accept(queryProfileShardResult.getQueryResults().get(i));
+ }
+
+ collector.accept(queryProfileShardResult.getCollectorResult());
+ }
+
+ private static ContextIndexSearcher newContextSearcher(IndexReader reader, ExecutorService executor) throws IOException {
+ return new ContextIndexSearcher(
+ reader,
+ IndexSearcher.getDefaultSimilarity(),
+ IndexSearcher.getDefaultQueryCache(),
+ IndexSearcher.getDefaultQueryCachingPolicy(),
+ true,
+ executor
+ );
+ }
+
+ private static ContextIndexSearcher newEarlyTerminationContextSearcher(IndexReader reader, int size, ExecutorService executor)
+ throws IOException {
+ return new ContextIndexSearcher(
+ reader,
+ IndexSearcher.getDefaultSimilarity(),
+ IndexSearcher.getDefaultQueryCache(),
+ IndexSearcher.getDefaultQueryCachingPolicy(),
+ true,
+ executor
+ ) {
+
+ @Override
+ public void search(List leaves, Weight weight, Collector collector) throws IOException {
+ final Collector in = new AssertingEarlyTerminationFilterCollector(collector, size);
+ super.search(leaves, weight, in);
+ }
+ };
+ }
+
+ private static class AssertingEarlyTerminationFilterCollector extends FilterCollector {
+ private final int size;
+
+ AssertingEarlyTerminationFilterCollector(Collector in, int size) {
+ super(in);
+ this.size = size;
+ }
+
+ @Override
+ public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
+ final LeafCollector in = super.getLeafCollector(context);
+ return new FilterLeafCollector(in) {
+ int collected;
+
+ @Override
+ public void collect(int doc) throws IOException {
+ assert collected <= size : "should not collect more than " + size + " doc per segment, got " + collected;
+ ++collected;
+ super.collect(doc);
+ }
+ };
+ }
+ }
+}
diff --git a/server/src/main/java/org/opensearch/common/lucene/MinimumScoreCollector.java b/server/src/main/java/org/opensearch/common/lucene/MinimumScoreCollector.java
index 81c98c862d2b2..a883e111f7c95 100644
--- a/server/src/main/java/org/opensearch/common/lucene/MinimumScoreCollector.java
+++ b/server/src/main/java/org/opensearch/common/lucene/MinimumScoreCollector.java
@@ -55,6 +55,10 @@ public MinimumScoreCollector(Collector collector, float minimumScore) {
this.minimumScore = minimumScore;
}
+ public Collector getCollector() {
+ return collector;
+ }
+
@Override
public void setScorer(Scorable scorer) throws IOException {
if (!(scorer instanceof ScoreCachingWrappingScorer)) {
diff --git a/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java b/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java
index 331b67a40878f..2dcb0578fd23d 100644
--- a/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java
+++ b/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java
@@ -53,6 +53,10 @@ public FilteredCollector(Collector collector, Weight filter) {
this.filter = filter;
}
+ public Collector getCollector() {
+ return collector;
+ }
+
@Override
public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
final ScorerSupplier filterScorerSupplier = filter.scorerSupplier(context);
diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java
index bfe8eed05ea9b..6fd78b834344d 100644
--- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java
+++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java
@@ -36,6 +36,7 @@
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.CollectorManager;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
@@ -82,6 +83,7 @@
import org.opensearch.search.profile.Profilers;
import org.opensearch.search.query.QueryPhaseExecutionException;
import org.opensearch.search.query.QuerySearchResult;
+import org.opensearch.search.query.ReduceableSearchResult;
import org.opensearch.search.rescore.RescoreContext;
import org.opensearch.search.slice.SliceBuilder;
import org.opensearch.search.sort.SortAndFormats;
@@ -163,7 +165,7 @@ final class DefaultSearchContext extends SearchContext {
private Profilers profilers;
private final Map searchExtBuilders = new HashMap<>();
- private final Map, Collector> queryCollectors = new HashMap<>();
+ private final Map, CollectorManager extends Collector, ReduceableSearchResult>> queryCollectorManagers = new HashMap<>();
private final QueryShardContext queryShardContext;
private final FetchPhase fetchPhase;
@@ -823,8 +825,8 @@ public long getRelativeTimeInMillis() {
}
@Override
- public Map, Collector> queryCollectors() {
- return queryCollectors;
+ public Map, CollectorManager extends Collector, ReduceableSearchResult>> queryCollectorManagers() {
+ return queryCollectorManagers;
}
@Override
diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregationPhase.java b/server/src/main/java/org/opensearch/search/aggregations/AggregationPhase.java
index be62b33adb356..5a837a6e14c5a 100644
--- a/server/src/main/java/org/opensearch/search/aggregations/AggregationPhase.java
+++ b/server/src/main/java/org/opensearch/search/aggregations/AggregationPhase.java
@@ -32,6 +32,7 @@
package org.opensearch.search.aggregations;
import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.CollectorManager;
import org.apache.lucene.search.Query;
import org.opensearch.common.inject.Inject;
import org.opensearch.common.lucene.search.Queries;
@@ -40,9 +41,11 @@
import org.opensearch.search.profile.query.CollectorResult;
import org.opensearch.search.profile.query.InternalProfileCollector;
import org.opensearch.search.query.QueryPhaseExecutionException;
+import org.opensearch.search.query.ReduceableSearchResult;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.List;
@@ -68,17 +71,18 @@ public void preProcess(SearchContext context) {
}
context.aggregations().aggregators(aggregators);
if (!collectors.isEmpty()) {
- Collector collector = MultiBucketCollector.wrap(collectors);
- ((BucketCollector) collector).preCollection();
- if (context.getProfilers() != null) {
- collector = new InternalProfileCollector(
- collector,
- CollectorResult.REASON_AGGREGATION,
- // TODO: report on child aggs as well
- Collections.emptyList()
- );
- }
- context.queryCollectors().put(AggregationPhase.class, collector);
+ final Collector collector = createCollector(context, collectors);
+ context.queryCollectorManagers().put(AggregationPhase.class, new CollectorManager() {
+ @Override
+ public Collector newCollector() throws IOException {
+ return collector;
+ }
+
+ @Override
+ public ReduceableSearchResult reduce(Collection collectors) throws IOException {
+ throw new UnsupportedOperationException("The concurrent aggregation over index segments is not supported");
+ }
+ });
}
} catch (IOException e) {
throw new AggregationInitializationException("Could not initialize aggregators", e);
@@ -147,6 +151,20 @@ public void execute(SearchContext context) {
// disable aggregations so that they don't run on next pages in case of scrolling
context.aggregations(null);
- context.queryCollectors().remove(AggregationPhase.class);
+ context.queryCollectorManagers().remove(AggregationPhase.class);
+ }
+
+ private Collector createCollector(SearchContext context, List collectors) throws IOException {
+ Collector collector = MultiBucketCollector.wrap(collectors);
+ ((BucketCollector) collector).preCollection();
+ if (context.getProfilers() != null) {
+ collector = new InternalProfileCollector(
+ collector,
+ CollectorResult.REASON_AGGREGATION,
+ // TODO: report on child aggs as well
+ Collections.emptyList()
+ );
+ }
+ return collector;
}
}
diff --git a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java
index 2cc15d4c65b96..2fb5ababe19ad 100644
--- a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java
+++ b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java
@@ -96,16 +96,6 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
private QueryProfiler profiler;
private MutableQueryTimeout cancellable;
- public ContextIndexSearcher(
- IndexReader reader,
- Similarity similarity,
- QueryCache queryCache,
- QueryCachingPolicy queryCachingPolicy,
- boolean wrapWithExitableDirectoryReader
- ) throws IOException {
- this(reader, similarity, queryCache, queryCachingPolicy, new MutableQueryTimeout(), wrapWithExitableDirectoryReader, null);
- }
-
public ContextIndexSearcher(
IndexReader reader,
Similarity similarity,
@@ -233,6 +223,25 @@ public void search(
result.topDocs(new TopDocsAndMaxScore(mergedTopDocs, Float.NaN), formats);
}
+ public void search(
+ Query query,
+ CollectorManager, TopFieldDocs> manager,
+ QuerySearchResult result,
+ DocValueFormat[] formats,
+ TotalHits totalHits
+ ) throws IOException {
+ TopFieldDocs mergedTopDocs = search(query, manager);
+ // Lucene sets shards indexes during merging of topDocs from different collectors
+ // We need to reset shard index; OpenSearch will set shard index later during reduce stage
+ for (ScoreDoc scoreDoc : mergedTopDocs.scoreDocs) {
+ scoreDoc.shardIndex = -1;
+ }
+ if (totalHits != null) { // we have already precalculated totalHits for the whole index
+ mergedTopDocs = new TopFieldDocs(totalHits, mergedTopDocs.scoreDocs, mergedTopDocs.fields);
+ }
+ result.topDocs(new TopDocsAndMaxScore(mergedTopDocs, Float.NaN), formats);
+ }
+
@Override
protected void search(List leaves, Weight weight, Collector collector) throws IOException {
for (LeafReaderContext ctx : leaves) { // search each subreader
@@ -420,8 +429,4 @@ public void clear() {
runnables.clear();
}
}
-
- public boolean allowConcurrentSegmentSearch() {
- return (getExecutor() != null);
- }
}
diff --git a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java
index 6d77558ec3bd0..961d45b0011ef 100644
--- a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java
+++ b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java
@@ -33,6 +33,7 @@
package org.opensearch.search.internal;
import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.CollectorManager;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.Query;
import org.opensearch.action.search.SearchShardTask;
@@ -61,6 +62,7 @@
import org.opensearch.search.fetch.subphase.highlight.SearchHighlightContext;
import org.opensearch.search.profile.Profilers;
import org.opensearch.search.query.QuerySearchResult;
+import org.opensearch.search.query.ReduceableSearchResult;
import org.opensearch.search.rescore.RescoreContext;
import org.opensearch.search.sort.SortAndFormats;
import org.opensearch.search.suggest.SuggestionSearchContext;
@@ -492,8 +494,8 @@ public Profilers getProfilers() {
}
@Override
- public Map, Collector> queryCollectors() {
- return in.queryCollectors();
+ public Map, CollectorManager extends Collector, ReduceableSearchResult>> queryCollectorManagers() {
+ return in.queryCollectorManagers();
}
@Override
diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java
index 7ff0eaed4be63..0c24fbee76335 100644
--- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java
+++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java
@@ -32,6 +32,7 @@
package org.opensearch.search.internal;
import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.CollectorManager;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.Query;
import org.opensearch.action.search.SearchShardTask;
@@ -66,6 +67,7 @@
import org.opensearch.search.fetch.subphase.highlight.SearchHighlightContext;
import org.opensearch.search.profile.Profilers;
import org.opensearch.search.query.QuerySearchResult;
+import org.opensearch.search.query.ReduceableSearchResult;
import org.opensearch.search.rescore.RescoreContext;
import org.opensearch.search.sort.SortAndFormats;
import org.opensearch.search.suggest.SuggestionSearchContext;
@@ -388,8 +390,8 @@ public final boolean hasOnlySuggest() {
*/
public abstract long getRelativeTimeInMillis();
- /** Return a view of the additional query collectors that should be run for this context. */
- public abstract Map, Collector> queryCollectors();
+ /** Return a view of the additional query collector managers that should be run for this context. */
+ public abstract Map, CollectorManager extends Collector, ReduceableSearchResult>> queryCollectorManagers();
public abstract QueryShardContext getQueryShardContext();
diff --git a/server/src/main/java/org/opensearch/search/profile/Profilers.java b/server/src/main/java/org/opensearch/search/profile/Profilers.java
index 6b9be0167b50f..3cc9b1710d420 100644
--- a/server/src/main/java/org/opensearch/search/profile/Profilers.java
+++ b/server/src/main/java/org/opensearch/search/profile/Profilers.java
@@ -57,7 +57,7 @@ public Profilers(ContextIndexSearcher searcher) {
/** Switch to a new profile. */
public QueryProfiler addQueryProfiler() {
- QueryProfiler profiler = new QueryProfiler(searcher.allowConcurrentSegmentSearch());
+ QueryProfiler profiler = new QueryProfiler(searcher.getExecutor() != null);
searcher.setProfiler(profiler);
queryProfilers.add(profiler);
return profiler;
diff --git a/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollectorManager.java b/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollectorManager.java
new file mode 100644
index 0000000000000..a35c22a6a2457
--- /dev/null
+++ b/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollectorManager.java
@@ -0,0 +1,89 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.search.profile.query;
+
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.CollectorManager;
+import org.opensearch.search.query.EarlyTerminatingListener;
+import org.opensearch.search.query.ReduceableSearchResult;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+public class InternalProfileCollectorManager
+ implements
+ ProfileCollectorManager,
+ EarlyTerminatingListener {
+ private final CollectorManager extends Collector, ReduceableSearchResult> manager;
+ private final String reason;
+ private final List children;
+ private long time = 0;
+
+ public InternalProfileCollectorManager(
+ CollectorManager extends Collector, ReduceableSearchResult> manager,
+ String reason,
+ List children
+ ) {
+ this.manager = manager;
+ this.reason = reason;
+ this.children = children;
+ }
+
+ @Override
+ public InternalProfileCollector newCollector() throws IOException {
+ return new InternalProfileCollector(manager.newCollector(), reason, children);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public ReduceableSearchResult reduce(Collection collectors) throws IOException {
+ final Collection subs = new ArrayList<>();
+
+ for (final InternalProfileCollector collector : collectors) {
+ subs.add(collector.getCollector());
+ time += collector.getTime();
+ }
+
+ return ((CollectorManager) manager).reduce(subs);
+ }
+
+ @Override
+ public String getReason() {
+ return reason;
+ }
+
+ @Override
+ public long getTime() {
+ return time;
+ }
+
+ @Override
+ public Collection extends InternalProfileComponent> children() {
+ return children;
+ }
+
+ @Override
+ public String getName() {
+ return manager.getClass().getSimpleName();
+ }
+
+ @Override
+ public CollectorResult getCollectorTree() {
+ return InternalProfileCollector.doGetCollectorTree(this);
+ }
+
+ @Override
+ public void onEarlyTermination(int maxCountHits, boolean forcedTermination) {
+ if (manager instanceof EarlyTerminatingListener) {
+ ((EarlyTerminatingListener) manager).onEarlyTermination(maxCountHits, forcedTermination);
+ }
+ }
+}
diff --git a/server/src/main/java/org/opensearch/search/profile/query/ProfileCollectorManager.java b/server/src/main/java/org/opensearch/search/profile/query/ProfileCollectorManager.java
new file mode 100644
index 0000000000000..7037988401fce
--- /dev/null
+++ b/server/src/main/java/org/opensearch/search/profile/query/ProfileCollectorManager.java
@@ -0,0 +1,17 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.search.profile.query;
+
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.CollectorManager;
+
+/**
+ * Collector manager which supports profiling
+ */
+public interface ProfileCollectorManager extends CollectorManager, InternalProfileComponent {}
diff --git a/server/src/main/java/org/opensearch/search/query/EarlyTerminatingCollector.java b/server/src/main/java/org/opensearch/search/query/EarlyTerminatingCollector.java
index 3ee8430522891..56cb49835dcc4 100644
--- a/server/src/main/java/org/opensearch/search/query/EarlyTerminatingCollector.java
+++ b/server/src/main/java/org/opensearch/search/query/EarlyTerminatingCollector.java
@@ -95,6 +95,10 @@ public void collect(int doc) throws IOException {
};
}
+ Collector getCollector() {
+ return in;
+ }
+
/**
* Returns true if this collector has early terminated.
*/
diff --git a/server/src/main/java/org/opensearch/search/query/EarlyTerminatingCollectorManager.java b/server/src/main/java/org/opensearch/search/query/EarlyTerminatingCollectorManager.java
new file mode 100644
index 0000000000000..32fbb24d16436
--- /dev/null
+++ b/server/src/main/java/org/opensearch/search/query/EarlyTerminatingCollectorManager.java
@@ -0,0 +1,74 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.search.query;
+
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.CollectorManager;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+public class EarlyTerminatingCollectorManager
+ implements
+ CollectorManager,
+ EarlyTerminatingListener {
+
+ private final CollectorManager manager;
+ private final int maxCountHits;
+ private boolean forceTermination;
+
+ EarlyTerminatingCollectorManager(CollectorManager manager, int maxCountHits, boolean forceTermination) {
+ this.manager = manager;
+ this.maxCountHits = maxCountHits;
+ this.forceTermination = forceTermination;
+ }
+
+ @Override
+ public EarlyTerminatingCollector newCollector() throws IOException {
+ return new EarlyTerminatingCollector(manager.newCollector(), maxCountHits, false /* forced termination is not supported */);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public ReduceableSearchResult reduce(Collection collectors) throws IOException {
+ final List innerCollectors = new ArrayList<>(collectors.size());
+
+ boolean didTerminateEarly = false;
+ for (EarlyTerminatingCollector collector : collectors) {
+ innerCollectors.add((C) collector.getCollector());
+ if (collector.hasEarlyTerminated()) {
+ didTerminateEarly = true;
+ }
+ }
+
+ if (didTerminateEarly) {
+ onEarlyTermination(maxCountHits, forceTermination);
+
+ final ReduceableSearchResult result = manager.reduce(innerCollectors);
+ return new ReduceableSearchResult() {
+ @Override
+ public void reduce(QuerySearchResult r) throws IOException {
+ result.reduce(r);
+ r.terminatedEarly(true);
+ }
+ };
+ }
+
+ return manager.reduce(innerCollectors);
+ }
+
+ @Override
+ public void onEarlyTermination(int maxCountHits, boolean forcedTermination) {
+ if (manager instanceof EarlyTerminatingListener) {
+ ((EarlyTerminatingListener) manager).onEarlyTermination(maxCountHits, forcedTermination);
+ }
+ }
+}
diff --git a/server/src/main/java/org/opensearch/search/query/EarlyTerminatingListener.java b/server/src/main/java/org/opensearch/search/query/EarlyTerminatingListener.java
new file mode 100644
index 0000000000000..dd6793266a7ca
--- /dev/null
+++ b/server/src/main/java/org/opensearch/search/query/EarlyTerminatingListener.java
@@ -0,0 +1,22 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.search.query;
+
+/**
+ * Early termination event listener. It is used during concurrent segment search
+ * to propagate the early termination intent.
+ */
+public interface EarlyTerminatingListener {
+ /**
+ * Early termination event notification
+ * @param maxCountHits desired maximum number of hits
+ * @param forcedTermination :true" if forced termination has been requested, "false" otherwise
+ */
+ void onEarlyTermination(int maxCountHits, boolean forcedTermination);
+}
diff --git a/server/src/main/java/org/opensearch/search/query/FilteredCollectorManager.java b/server/src/main/java/org/opensearch/search/query/FilteredCollectorManager.java
new file mode 100644
index 0000000000000..ef47cf2a388f3
--- /dev/null
+++ b/server/src/main/java/org/opensearch/search/query/FilteredCollectorManager.java
@@ -0,0 +1,45 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.search.query;
+
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.CollectorManager;
+import org.apache.lucene.search.Weight;
+import org.opensearch.common.lucene.search.FilteredCollector;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+
+class FilteredCollectorManager implements CollectorManager {
+ private final CollectorManager extends Collector, ReduceableSearchResult> manager;
+ private final Weight filter;
+
+ FilteredCollectorManager(CollectorManager extends Collector, ReduceableSearchResult> manager, Weight filter) {
+ this.manager = manager;
+ this.filter = filter;
+ }
+
+ @Override
+ public FilteredCollector newCollector() throws IOException {
+ return new FilteredCollector(manager.newCollector(), filter);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public ReduceableSearchResult reduce(Collection collectors) throws IOException {
+ final Collection subCollectors = new ArrayList<>();
+
+ for (final FilteredCollector collector : collectors) {
+ subCollectors.add(collector.getCollector());
+ }
+
+ return ((CollectorManager) manager).reduce(subCollectors);
+ }
+}
diff --git a/server/src/main/java/org/opensearch/search/query/MinimumCollectorManager.java b/server/src/main/java/org/opensearch/search/query/MinimumCollectorManager.java
new file mode 100644
index 0000000000000..22b25222b639d
--- /dev/null
+++ b/server/src/main/java/org/opensearch/search/query/MinimumCollectorManager.java
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.search.query;
+
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.CollectorManager;
+import org.opensearch.common.lucene.MinimumScoreCollector;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+
+class MinimumCollectorManager implements CollectorManager {
+ private final CollectorManager extends Collector, ReduceableSearchResult> manager;
+ private final float minimumScore;
+
+ MinimumCollectorManager(CollectorManager extends Collector, ReduceableSearchResult> manager, float minimumScore) {
+ this.manager = manager;
+ this.minimumScore = minimumScore;
+ }
+
+ @Override
+ public MinimumScoreCollector newCollector() throws IOException {
+ return new MinimumScoreCollector(manager.newCollector(), minimumScore);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public ReduceableSearchResult reduce(Collection collectors) throws IOException {
+ final Collection subCollectors = new ArrayList<>();
+
+ for (final MinimumScoreCollector collector : collectors) {
+ subCollectors.add(collector.getCollector());
+ }
+
+ return ((CollectorManager) manager).reduce(subCollectors);
+ }
+}
diff --git a/server/src/main/java/org/opensearch/search/query/MultiCollectorWrapper.java b/server/src/main/java/org/opensearch/search/query/MultiCollectorWrapper.java
new file mode 100644
index 0000000000000..0ee423b48caeb
--- /dev/null
+++ b/server/src/main/java/org/opensearch/search/query/MultiCollectorWrapper.java
@@ -0,0 +1,58 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.search.query;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.MultiCollector;
+import org.apache.lucene.search.ScoreMode;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+/**
+ * Wraps MultiCollector and provide access to underlying collectors.
+ * Please check out https://github.com/apache/lucene/pull/455.
+ */
+public class MultiCollectorWrapper implements Collector {
+ private final MultiCollector delegate;
+ private final Collection collectors;
+
+ MultiCollectorWrapper(MultiCollector delegate, Collection collectors) {
+ this.delegate = delegate;
+ this.collectors = collectors;
+ }
+
+ @Override
+ public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
+ return delegate.getLeafCollector(context);
+ }
+
+ @Override
+ public ScoreMode scoreMode() {
+ return delegate.scoreMode();
+ }
+
+ public Collection getCollectors() {
+ return collectors;
+ }
+
+ public static Collector wrap(Collector... collectors) {
+ final List collectorsList = Arrays.asList(collectors);
+ final Collector collector = MultiCollector.wrap(collectorsList);
+ if (collector instanceof MultiCollector) {
+ return new MultiCollectorWrapper((MultiCollector) collector, collectorsList);
+ } else {
+ return collector;
+ }
+ }
+}
diff --git a/server/src/main/java/org/opensearch/search/query/QueryCollectorContext.java b/server/src/main/java/org/opensearch/search/query/QueryCollectorContext.java
index d1ff855888f0b..95ad514adf97d 100644
--- a/server/src/main/java/org/opensearch/search/query/QueryCollectorContext.java
+++ b/server/src/main/java/org/opensearch/search/query/QueryCollectorContext.java
@@ -33,6 +33,7 @@
package org.opensearch.search.query;
import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.CollectorManager;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MultiCollector;
import org.apache.lucene.search.Query;
@@ -42,6 +43,7 @@
import org.opensearch.common.lucene.MinimumScoreCollector;
import org.opensearch.common.lucene.search.FilteredCollector;
import org.opensearch.search.profile.query.InternalProfileCollector;
+import org.opensearch.search.profile.query.InternalProfileCollectorManager;
import java.io.IOException;
import java.util.ArrayList;
@@ -54,7 +56,7 @@
import static org.opensearch.search.profile.query.CollectorResult.REASON_SEARCH_POST_FILTER;
import static org.opensearch.search.profile.query.CollectorResult.REASON_SEARCH_TERMINATE_AFTER_COUNT;
-abstract class QueryCollectorContext {
+public abstract class QueryCollectorContext {
private static final Collector EMPTY_COLLECTOR = new SimpleCollector() {
@Override
public void collect(int doc) {}
@@ -77,6 +79,8 @@ public ScoreMode scoreMode() {
*/
abstract Collector create(Collector in) throws IOException;
+ abstract CollectorManager, ReduceableSearchResult> createManager(CollectorManager, ReduceableSearchResult> in) throws IOException;
+
/**
* Wraps this collector with a profiler
*/
@@ -85,6 +89,18 @@ protected InternalProfileCollector createWithProfiler(InternalProfileCollector i
return new InternalProfileCollector(collector, profilerName, in != null ? Collections.singletonList(in) : Collections.emptyList());
}
+ /**
+ * Wraps this collector manager with a profiler
+ */
+ protected InternalProfileCollectorManager createWithProfiler(InternalProfileCollectorManager in) throws IOException {
+ final CollectorManager extends Collector, ReduceableSearchResult> manager = createManager(in);
+ return new InternalProfileCollectorManager(
+ manager,
+ profilerName,
+ in != null ? Collections.singletonList(in) : Collections.emptyList()
+ );
+ }
+
/**
* Post-process result
after search execution.
*
@@ -126,6 +142,11 @@ static QueryCollectorContext createMinScoreCollectorContext(float minScore) {
Collector create(Collector in) {
return new MinimumScoreCollector(in, minScore);
}
+
+ @Override
+ CollectorManager, ReduceableSearchResult> createManager(CollectorManager, ReduceableSearchResult> in) throws IOException {
+ return new MinimumCollectorManager(in, minScore);
+ }
};
}
@@ -139,35 +160,58 @@ Collector create(Collector in) throws IOException {
final Weight filterWeight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f);
return new FilteredCollector(in, filterWeight);
}
+
+ @Override
+ CollectorManager, ReduceableSearchResult> createManager(CollectorManager, ReduceableSearchResult> in) throws IOException {
+ final Weight filterWeight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f);
+ return new FilteredCollectorManager(in, filterWeight);
+ }
};
}
/**
- * Creates a multi collector from the provided subs
+ * Creates a multi collector manager from the provided subs
*/
- static QueryCollectorContext createMultiCollectorContext(Collection subs) {
+ static QueryCollectorContext createMultiCollectorContext(
+ Collection> subs
+ ) {
return new QueryCollectorContext(REASON_SEARCH_MULTI) {
@Override
- Collector create(Collector in) {
+ Collector create(Collector in) throws IOException {
List subCollectors = new ArrayList<>();
subCollectors.add(in);
- subCollectors.addAll(subs);
+ for (CollectorManager extends Collector, ReduceableSearchResult> manager : subs) {
+ subCollectors.add(manager.newCollector());
+ }
return MultiCollector.wrap(subCollectors);
}
@Override
- protected InternalProfileCollector createWithProfiler(InternalProfileCollector in) {
+ protected InternalProfileCollector createWithProfiler(InternalProfileCollector in) throws IOException {
final List subCollectors = new ArrayList<>();
subCollectors.add(in);
- if (subs.stream().anyMatch((col) -> col instanceof InternalProfileCollector == false)) {
- throw new IllegalArgumentException("non-profiling collector");
- }
- for (Collector collector : subs) {
+
+ for (CollectorManager extends Collector, ReduceableSearchResult> manager : subs) {
+ final Collector collector = manager.newCollector();
+ if (!(collector instanceof InternalProfileCollector)) {
+ throw new IllegalArgumentException("non-profiling collector");
+ }
subCollectors.add((InternalProfileCollector) collector);
}
+
final Collector collector = MultiCollector.wrap(subCollectors);
return new InternalProfileCollector(collector, REASON_SEARCH_MULTI, subCollectors);
}
+
+ @Override
+ CollectorManager extends Collector, ReduceableSearchResult> createManager(
+ CollectorManager extends Collector, ReduceableSearchResult> in
+ ) throws IOException {
+ final List> managers = new ArrayList<>();
+ managers.add(in);
+ managers.addAll(subs);
+ return QueryCollectorManagerContext.createOpaqueCollectorManager(managers);
+ }
};
}
@@ -192,6 +236,13 @@ Collector create(Collector in) {
this.collector = MultiCollector.wrap(subCollectors);
return collector;
}
+
+ @Override
+ CollectorManager extends Collector, ReduceableSearchResult> createManager(
+ CollectorManager extends Collector, ReduceableSearchResult> in
+ ) throws IOException {
+ return new EarlyTerminatingCollectorManager<>(in, numHits, true);
+ }
};
}
}
diff --git a/server/src/main/java/org/opensearch/search/query/QueryCollectorManagerContext.java b/server/src/main/java/org/opensearch/search/query/QueryCollectorManagerContext.java
new file mode 100644
index 0000000000000..c98f4884bb030
--- /dev/null
+++ b/server/src/main/java/org/opensearch/search/query/QueryCollectorManagerContext.java
@@ -0,0 +1,99 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.search.query;
+
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.CollectorManager;
+import org.apache.lucene.search.MultiCollectorManager;
+import org.opensearch.search.profile.query.InternalProfileCollectorManager;
+import org.opensearch.search.profile.query.ProfileCollectorManager;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+public abstract class QueryCollectorManagerContext {
+ private static class QueryCollectorManager implements CollectorManager {
+ private final MultiCollectorManager manager;
+
+ private QueryCollectorManager(Collection> managers) {
+ this.manager = new MultiCollectorManager(managers.toArray(new CollectorManager, ?>[0]));
+ }
+
+ @Override
+ public Collector newCollector() throws IOException {
+ return manager.newCollector();
+ }
+
+ @Override
+ public ReduceableSearchResult reduce(Collection collectors) throws IOException {
+ final Object[] results = manager.reduce(collectors);
+
+ final ReduceableSearchResult[] transformed = new ReduceableSearchResult[results.length];
+ for (int i = 0; i < results.length; ++i) {
+ assert results[i] instanceof ReduceableSearchResult;
+ transformed[i] = (ReduceableSearchResult) results[i];
+ }
+
+ return reduceWith(transformed);
+ }
+
+ protected ReduceableSearchResult reduceWith(final ReduceableSearchResult[] results) {
+ return (QuerySearchResult result) -> {
+ for (final ReduceableSearchResult r : results) {
+ r.reduce(result);
+ }
+ };
+ }
+ }
+
+ private static class OpaqueQueryCollectorManager extends QueryCollectorManager {
+ private OpaqueQueryCollectorManager(Collection> managers) {
+ super(managers);
+ }
+
+ @Override
+ protected ReduceableSearchResult reduceWith(final ReduceableSearchResult[] results) {
+ return (QuerySearchResult result) -> {};
+ }
+ }
+
+ public static CollectorManager extends Collector, ReduceableSearchResult> createOpaqueCollectorManager(
+ List> managers
+ ) throws IOException {
+ return new OpaqueQueryCollectorManager(managers);
+ }
+
+ public static CollectorManager extends Collector, ReduceableSearchResult> createMultiCollectorManager(
+ List collectors
+ ) throws IOException {
+ final Collection> managers = new ArrayList<>();
+
+ CollectorManager, ReduceableSearchResult> manager = null;
+ for (QueryCollectorContext ctx : collectors) {
+ manager = ctx.createManager(manager);
+ managers.add(manager);
+ }
+
+ return new QueryCollectorManager(managers);
+ }
+
+ public static ProfileCollectorManager extends Collector, ReduceableSearchResult> createQueryCollectorManagerWithProfiler(
+ List collectors
+ ) throws IOException {
+ InternalProfileCollectorManager manager = null;
+
+ for (QueryCollectorContext ctx : collectors) {
+ manager = ctx.createWithProfiler(manager);
+ }
+
+ return manager;
+ }
+}
diff --git a/server/src/main/java/org/opensearch/search/query/QueryPhase.java b/server/src/main/java/org/opensearch/search/query/QueryPhase.java
index 3edbc16cd613f..1501067ec7983 100644
--- a/server/src/main/java/org/opensearch/search/query/QueryPhase.java
+++ b/server/src/main/java/org/opensearch/search/query/QueryPhase.java
@@ -238,9 +238,9 @@ static boolean executeInternal(SearchContext searchContext, QueryPhaseSearcher q
// this collector can filter documents during the collection
hasFilterCollector = true;
}
- if (searchContext.queryCollectors().isEmpty() == false) {
+ if (searchContext.queryCollectorManagers().isEmpty() == false) {
// plug in additional collectors, like aggregations
- collectors.add(createMultiCollectorContext(searchContext.queryCollectors().values()));
+ collectors.add(createMultiCollectorContext(searchContext.queryCollectorManagers().values()));
}
if (searchContext.minimumScore() != null) {
// apply the minimum score after multi collector so we filter aggs as well
diff --git a/server/src/main/java/org/opensearch/search/query/ReduceableSearchResult.java b/server/src/main/java/org/opensearch/search/query/ReduceableSearchResult.java
new file mode 100644
index 0000000000000..48e8d7198ea3b
--- /dev/null
+++ b/server/src/main/java/org/opensearch/search/query/ReduceableSearchResult.java
@@ -0,0 +1,23 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.search.query;
+
+import java.io.IOException;
+
+/**
+ * The search result callback returned by reduce phase of the collector manager.
+ */
+public interface ReduceableSearchResult {
+ /**
+ * Apply the reduce operation to the query search results
+ * @param result query search results
+ * @throws IOException exception if reduce operation failed
+ */
+ void reduce(QuerySearchResult result) throws IOException;
+}
diff --git a/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java b/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java
index 9cf7dca3c4caf..5f19462a2c33a 100644
--- a/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java
+++ b/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java
@@ -44,6 +44,7 @@
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.CollectorManager;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.DocValuesFieldExistsQuery;
import org.apache.lucene.search.FieldDoc;
@@ -80,6 +81,9 @@
import org.opensearch.search.sort.SortAndFormats;
import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
import java.util.Objects;
import java.util.function.Supplier;
@@ -89,7 +93,7 @@
/**
* A {@link QueryCollectorContext} that creates top docs collector
*/
-abstract class TopDocsCollectorContext extends QueryCollectorContext {
+public abstract class TopDocsCollectorContext extends QueryCollectorContext {
protected final int numHits;
TopDocsCollectorContext(String profilerName, int numHits) {
@@ -107,7 +111,7 @@ final int numHits() {
/**
* Returns true if the top docs should be re-scored after initial search
*/
- boolean shouldRescore() {
+ public boolean shouldRescore() {
return false;
}
@@ -115,6 +119,8 @@ static class EmptyTopDocsCollectorContext extends TopDocsCollectorContext {
private final Sort sort;
private final Collector collector;
private final Supplier hitCountSupplier;
+ private final int trackTotalHitsUpTo;
+ private final int hitCount;
/**
* Ctr
@@ -132,16 +138,18 @@ private EmptyTopDocsCollectorContext(
) throws IOException {
super(REASON_SEARCH_COUNT, 0);
this.sort = sortAndFormats == null ? null : sortAndFormats.sort;
- if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) {
+ this.trackTotalHitsUpTo = trackTotalHitsUpTo;
+ if (this.trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) {
this.collector = new EarlyTerminatingCollector(new TotalHitCountCollector(), 0, false);
// for bwc hit count is set to 0, it will be converted to -1 by the coordinating node
this.hitCountSupplier = () -> new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO);
+ this.hitCount = Integer.MIN_VALUE;
} else {
TotalHitCountCollector hitCountCollector = new TotalHitCountCollector();
// implicit total hit counts are valid only when there is no filter collector in the chain
- int hitCount = hasFilterCollector ? -1 : shortcutTotalHitCount(reader, query);
- if (hitCount == -1) {
- if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_ACCURATE) {
+ this.hitCount = hasFilterCollector ? -1 : shortcutTotalHitCount(reader, query);
+ if (this.hitCount == -1) {
+ if (this.trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_ACCURATE) {
this.collector = hitCountCollector;
this.hitCountSupplier = () -> new TotalHits(hitCountCollector.getTotalHits(), TotalHits.Relation.EQUAL_TO);
} else {
@@ -159,6 +167,39 @@ private EmptyTopDocsCollectorContext(
}
}
+ @Override
+ CollectorManager, ReduceableSearchResult> createManager(CollectorManager, ReduceableSearchResult> in) throws IOException {
+ assert in == null;
+
+ CollectorManager, ReduceableSearchResult> manager = null;
+
+ if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) {
+ manager = new EarlyTerminatingCollectorManager<>(
+ new TotalHitCountCollectorManager.Empty(new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), sort),
+ 0,
+ false
+ );
+ } else {
+ if (hitCount == -1) {
+ if (trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_ACCURATE) {
+ manager = new EarlyTerminatingCollectorManager<>(
+ new TotalHitCountCollectorManager(sort),
+ trackTotalHitsUpTo,
+ false
+ );
+ }
+ } else {
+ manager = new EarlyTerminatingCollectorManager<>(
+ new TotalHitCountCollectorManager.Empty(new TotalHits(hitCount, TotalHits.Relation.EQUAL_TO), sort),
+ 0,
+ false
+ );
+ }
+ }
+
+ return manager;
+ }
+
@Override
Collector create(Collector in) {
assert in == null;
@@ -181,7 +222,11 @@ void postProcess(QuerySearchResult result) {
static class CollapsingTopDocsCollectorContext extends TopDocsCollectorContext {
private final DocValueFormat[] sortFmt;
private final CollapsingTopDocsCollector> topDocsCollector;
+ private final Collector collector;
private final Supplier maxScoreSupplier;
+ private final CollapseContext collapseContext;
+ private final boolean trackMaxScore;
+ private final Sort sort;
/**
* Ctr
@@ -199,30 +244,94 @@ private CollapsingTopDocsCollectorContext(
super(REASON_SEARCH_TOP_HITS, numHits);
assert numHits > 0;
assert collapseContext != null;
- Sort sort = sortAndFormats == null ? Sort.RELEVANCE : sortAndFormats.sort;
+ this.sort = sortAndFormats == null ? Sort.RELEVANCE : sortAndFormats.sort;
this.sortFmt = sortAndFormats == null ? new DocValueFormat[] { DocValueFormat.RAW } : sortAndFormats.formats;
+ this.collapseContext = collapseContext;
this.topDocsCollector = collapseContext.createTopDocs(sort, numHits);
+ this.trackMaxScore = trackMaxScore;
- MaxScoreCollector maxScoreCollector;
+ MaxScoreCollector maxScoreCollector = null;
if (trackMaxScore) {
maxScoreCollector = new MaxScoreCollector();
maxScoreSupplier = maxScoreCollector::getMaxScore;
} else {
+ maxScoreCollector = null;
maxScoreSupplier = () -> Float.NaN;
}
+
+ this.collector = MultiCollector.wrap(topDocsCollector, maxScoreCollector);
}
@Override
Collector create(Collector in) throws IOException {
assert in == null;
- return topDocsCollector;
+ return collector;
}
@Override
void postProcess(QuerySearchResult result) throws IOException {
- CollapseTopFieldDocs topDocs = topDocsCollector.getTopDocs();
+ final CollapseTopFieldDocs topDocs = topDocsCollector.getTopDocs();
result.topDocs(new TopDocsAndMaxScore(topDocs, maxScoreSupplier.get()), sortFmt);
}
+
+ @Override
+ CollectorManager, ReduceableSearchResult> createManager(CollectorManager, ReduceableSearchResult> in) throws IOException {
+ return new CollectorManager() {
+ @Override
+ public Collector newCollector() throws IOException {
+ MaxScoreCollector maxScoreCollector = null;
+
+ if (trackMaxScore) {
+ maxScoreCollector = new MaxScoreCollector();
+ }
+
+ return MultiCollectorWrapper.wrap(collapseContext.createTopDocs(sort, numHits), maxScoreCollector);
+ }
+
+ @Override
+ public ReduceableSearchResult reduce(Collection collectors) throws IOException {
+ final Collection subs = new ArrayList<>();
+ for (final Collector collector : collectors) {
+ if (collector instanceof MultiCollectorWrapper) {
+ subs.addAll(((MultiCollectorWrapper) collector).getCollectors());
+ } else {
+ subs.add(collector);
+ }
+ }
+
+ final Collection topFieldDocs = new ArrayList();
+ float maxScore = Float.NaN;
+
+ for (final Collector collector : subs) {
+ if (collector instanceof CollapsingTopDocsCollector>) {
+ topFieldDocs.add(((CollapsingTopDocsCollector>) collector).getTopDocs());
+ } else if (collector instanceof MaxScoreCollector) {
+ float score = ((MaxScoreCollector) collector).getMaxScore();
+ if (Float.isNaN(maxScore)) {
+ maxScore = score;
+ } else {
+ maxScore = Math.max(maxScore, score);
+ }
+ }
+ }
+
+ return reduceWith(topFieldDocs, maxScore);
+ }
+ };
+ }
+
+ protected ReduceableSearchResult reduceWith(final Collection topFieldDocs, float maxScore) {
+ return (QuerySearchResult result) -> {
+ final CollapseTopFieldDocs topDocs = CollapseTopFieldDocs.merge(
+ sort,
+ 0,
+ numHits,
+ topFieldDocs.toArray(new CollapseTopFieldDocs[0]),
+ true
+ );
+ result.topDocs(new TopDocsAndMaxScore(topDocs, maxScore), sortFmt);
+ };
+ }
}
abstract static class SimpleTopDocsCollectorContext extends TopDocsCollectorContext {
@@ -240,11 +349,38 @@ private static TopDocsCollector> createCollector(
}
}
+ private static CollectorManager extends TopDocsCollector>, ? extends TopDocs> createCollectorManager(
+ @Nullable SortAndFormats sortAndFormats,
+ int numHits,
+ @Nullable ScoreDoc searchAfter,
+ int hitCountThreshold
+ ) {
+ if (sortAndFormats == null) {
+ // See please https://github.com/apache/lucene/pull/450, should be fixed in 9.x
+ if (searchAfter != null) {
+ return TopScoreDocCollector.createSharedManager(
+ numHits,
+ new FieldDoc(searchAfter.doc, searchAfter.score),
+ hitCountThreshold
+ );
+ } else {
+ return TopScoreDocCollector.createSharedManager(numHits, null, hitCountThreshold);
+ }
+ } else {
+ return TopFieldCollector.createSharedManager(sortAndFormats.sort, numHits, (FieldDoc) searchAfter, hitCountThreshold);
+ }
+ }
+
protected final @Nullable SortAndFormats sortAndFormats;
private final Collector collector;
private final Supplier totalHitsSupplier;
private final Supplier topDocsSupplier;
private final Supplier maxScoreSupplier;
+ private final ScoreDoc searchAfter;
+ private final int trackTotalHitsUpTo;
+ private final boolean trackMaxScore;
+ private final boolean hasInfMaxScore;
+ private final int hitCount;
/**
* Ctr
@@ -269,24 +405,30 @@ private SimpleTopDocsCollectorContext(
) throws IOException {
super(REASON_SEARCH_TOP_HITS, numHits);
this.sortAndFormats = sortAndFormats;
+ this.searchAfter = searchAfter;
+ this.trackTotalHitsUpTo = trackTotalHitsUpTo;
+ this.trackMaxScore = trackMaxScore;
+ this.hasInfMaxScore = hasInfMaxScore(query);
final TopDocsCollector> topDocsCollector;
- if ((sortAndFormats == null || SortField.FIELD_SCORE.equals(sortAndFormats.sort.getSort()[0])) && hasInfMaxScore(query)) {
+ if ((sortAndFormats == null || SortField.FIELD_SCORE.equals(sortAndFormats.sort.getSort()[0])) && hasInfMaxScore) {
// disable max score optimization since we have a mandatory clause
// that doesn't track the maximum score
topDocsCollector = createCollector(sortAndFormats, numHits, searchAfter, Integer.MAX_VALUE);
topDocsSupplier = new CachedSupplier<>(topDocsCollector::topDocs);
totalHitsSupplier = () -> topDocsSupplier.get().totalHits;
+ hitCount = Integer.MIN_VALUE;
} else if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) {
// don't compute hit counts via the collector
topDocsCollector = createCollector(sortAndFormats, numHits, searchAfter, 1);
topDocsSupplier = new CachedSupplier<>(topDocsCollector::topDocs);
totalHitsSupplier = () -> new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO);
+ hitCount = -1;
} else {
// implicit total hit counts are valid only when there is no filter collector in the chain
- final int hitCount = hasFilterCollector ? -1 : shortcutTotalHitCount(reader, query);
- if (hitCount == -1) {
+ this.hitCount = hasFilterCollector ? -1 : shortcutTotalHitCount(reader, query);
+ if (this.hitCount == -1) {
topDocsCollector = createCollector(sortAndFormats, numHits, searchAfter, trackTotalHitsUpTo);
topDocsSupplier = new CachedSupplier<>(topDocsCollector::topDocs);
totalHitsSupplier = () -> topDocsSupplier.get().totalHits;
@@ -294,7 +436,7 @@ private SimpleTopDocsCollectorContext(
// don't compute hit counts via the collector
topDocsCollector = createCollector(sortAndFormats, numHits, searchAfter, 1);
topDocsSupplier = new CachedSupplier<>(topDocsCollector::topDocs);
- totalHitsSupplier = () -> new TotalHits(hitCount, TotalHits.Relation.EQUAL_TO);
+ totalHitsSupplier = () -> new TotalHits(this.hitCount, TotalHits.Relation.EQUAL_TO);
}
}
MaxScoreCollector maxScoreCollector = null;
@@ -315,7 +457,98 @@ private SimpleTopDocsCollectorContext(
}
this.collector = MultiCollector.wrap(topDocsCollector, maxScoreCollector);
+ }
+
+ private class SimpleTopDocsCollectorManager
+ implements
+ CollectorManager,
+ EarlyTerminatingListener {
+ private Integer terminatedAfter;
+ private final CollectorManager extends TopDocsCollector>, ? extends TopDocs> manager;
+
+ private SimpleTopDocsCollectorManager() {
+ if ((sortAndFormats == null || SortField.FIELD_SCORE.equals(sortAndFormats.sort.getSort()[0])) && hasInfMaxScore) {
+ // disable max score optimization since we have a mandatory clause
+ // that doesn't track the maximum score
+ manager = createCollectorManager(sortAndFormats, numHits, searchAfter, Integer.MAX_VALUE);
+ } else if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) {
+ // don't compute hit counts via the collector
+ manager = createCollectorManager(sortAndFormats, numHits, searchAfter, 1);
+ } else {
+ // implicit total hit counts are valid only when there is no filter collector in the chain
+ if (hitCount == -1) {
+ manager = createCollectorManager(sortAndFormats, numHits, searchAfter, trackTotalHitsUpTo);
+ } else {
+ // don't compute hit counts via the collector
+ manager = createCollectorManager(sortAndFormats, numHits, searchAfter, 1);
+ }
+ }
+ }
+
+ @Override
+ public void onEarlyTermination(int maxCountHits, boolean forcedTermination) {
+ terminatedAfter = maxCountHits;
+ }
+
+ @Override
+ public Collector newCollector() throws IOException {
+ MaxScoreCollector maxScoreCollector = null;
+
+ if (sortAndFormats != null && trackMaxScore) {
+ maxScoreCollector = new MaxScoreCollector();
+ }
+
+ return MultiCollectorWrapper.wrap(manager.newCollector(), maxScoreCollector);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public ReduceableSearchResult reduce(Collection collectors) throws IOException {
+ final Collection> topDocsCollectors = new ArrayList<>();
+ final Collection maxScoreCollectors = new ArrayList<>();
+
+ for (final Collector collector : collectors) {
+ if (collector instanceof MultiCollectorWrapper) {
+ for (final Collector sub : (((MultiCollectorWrapper) collector).getCollectors())) {
+ if (sub instanceof TopDocsCollector>) {
+ topDocsCollectors.add((TopDocsCollector>) sub);
+ } else if (sub instanceof MaxScoreCollector) {
+ maxScoreCollectors.add((MaxScoreCollector) sub);
+ }
+ }
+ } else if (collector instanceof TopDocsCollector>) {
+ topDocsCollectors.add((TopDocsCollector>) collector);
+ } else if (collector instanceof MaxScoreCollector) {
+ maxScoreCollectors.add((MaxScoreCollector) collector);
+ }
+ }
+
+ float maxScore = Float.NaN;
+ for (final MaxScoreCollector collector : maxScoreCollectors) {
+ float score = collector.getMaxScore();
+ if (Float.isNaN(maxScore)) {
+ maxScore = score;
+ } else {
+ maxScore = Math.max(maxScore, score);
+ }
+ }
+ final TopDocs topDocs = ((CollectorManager, ? extends TopDocs>) manager).reduce(topDocsCollectors);
+ return reduceWith(topDocs, maxScore, terminatedAfter);
+ }
+ }
+
+ @Override
+ CollectorManager, ReduceableSearchResult> createManager(CollectorManager, ReduceableSearchResult> in) throws IOException {
+ assert in == null;
+ return new SimpleTopDocsCollectorManager();
+ }
+
+ protected ReduceableSearchResult reduceWith(final TopDocs topDocs, final float maxScore, final Integer terminatedAfter) {
+ return (QuerySearchResult result) -> {
+ final TopDocsAndMaxScore topDocsAndMaxScore = newTopDocs(topDocs, maxScore, terminatedAfter);
+ result.topDocs(topDocsAndMaxScore, sortAndFormats == null ? null : sortAndFormats.formats);
+ };
}
@Override
@@ -324,6 +557,50 @@ Collector create(Collector in) {
return collector;
}
+ TopDocsAndMaxScore newTopDocs(final TopDocs topDocs, final float maxScore, final Integer terminatedAfter) {
+ TotalHits totalHits = null;
+
+ if ((sortAndFormats == null || SortField.FIELD_SCORE.equals(sortAndFormats.sort.getSort()[0])) && hasInfMaxScore) {
+ totalHits = topDocs.totalHits;
+ } else if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) {
+ // don't compute hit counts via the collector
+ totalHits = new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO);
+ } else {
+ if (hitCount == -1) {
+ totalHits = topDocs.totalHits;
+ } else {
+ totalHits = new TotalHits(hitCount, TotalHits.Relation.EQUAL_TO);
+ }
+ }
+
+ // Since we cannot support early forced termination, we have to simulate it by
+ // artificially reducing the number of total hits and doc scores.
+ ScoreDoc[] scoreDocs = topDocs.scoreDocs;
+ if (terminatedAfter != null) {
+ if (totalHits.value > terminatedAfter) {
+ totalHits = new TotalHits(terminatedAfter, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO);
+ }
+
+ if (scoreDocs != null && scoreDocs.length > terminatedAfter) {
+ scoreDocs = Arrays.copyOf(scoreDocs, terminatedAfter);
+ }
+ }
+
+ final TopDocs newTopDocs;
+ if (topDocs instanceof TopFieldDocs) {
+ TopFieldDocs fieldDocs = (TopFieldDocs) topDocs;
+ newTopDocs = new TopFieldDocs(totalHits, scoreDocs, fieldDocs.fields);
+ } else {
+ newTopDocs = new TopDocs(totalHits, scoreDocs);
+ }
+
+ if (Float.isNaN(maxScore) && newTopDocs.scoreDocs.length > 0 && sortAndFormats == null) {
+ return new TopDocsAndMaxScore(newTopDocs, newTopDocs.scoreDocs[0].score);
+ } else {
+ return new TopDocsAndMaxScore(newTopDocs, maxScore);
+ }
+ }
+
TopDocsAndMaxScore newTopDocs() {
TopDocs in = topDocsSupplier.get();
float maxScore = maxScoreSupplier.get();
@@ -373,6 +650,35 @@ private ScrollingTopDocsCollectorContext(
this.numberOfShards = numberOfShards;
}
+ @Override
+ protected ReduceableSearchResult reduceWith(final TopDocs topDocs, final float maxScore, final Integer terminatedAfter) {
+ return (QuerySearchResult result) -> {
+ final TopDocsAndMaxScore topDocsAndMaxScore = newTopDocs(topDocs, maxScore, terminatedAfter);
+
+ if (scrollContext.totalHits == null) {
+ // first round
+ scrollContext.totalHits = topDocsAndMaxScore.topDocs.totalHits;
+ scrollContext.maxScore = topDocsAndMaxScore.maxScore;
+ } else {
+ // subsequent round: the total number of hits and
+ // the maximum score were computed on the first round
+ topDocsAndMaxScore.topDocs.totalHits = scrollContext.totalHits;
+ topDocsAndMaxScore.maxScore = scrollContext.maxScore;
+ }
+
+ if (numberOfShards == 1) {
+ // if we fetch the document in the same roundtrip, we already know the last emitted doc
+ if (topDocsAndMaxScore.topDocs.scoreDocs.length > 0) {
+ // set the last emitted doc
+ scrollContext.lastEmittedDoc = topDocsAndMaxScore.topDocs.scoreDocs[topDocsAndMaxScore.topDocs.scoreDocs.length
+ - 1];
+ }
+ }
+
+ result.topDocs(topDocsAndMaxScore, sortAndFormats == null ? null : sortAndFormats.formats);
+ };
+ }
+
@Override
void postProcess(QuerySearchResult result) throws IOException {
final TopDocsAndMaxScore topDocs = newTopDocs();
@@ -457,7 +763,7 @@ static int shortcutTotalHitCount(IndexReader reader, Query query) throws IOExcep
* Creates a {@link TopDocsCollectorContext} from the provided searchContext
.
* @param hasFilterCollector True if the collector chain contains at least one collector that can filters document.
*/
- static TopDocsCollectorContext createTopDocsCollectorContext(SearchContext searchContext, boolean hasFilterCollector)
+ public static TopDocsCollectorContext createTopDocsCollectorContext(SearchContext searchContext, boolean hasFilterCollector)
throws IOException {
final IndexReader reader = searchContext.searcher().getIndexReader();
final Query query = searchContext.query();
@@ -515,7 +821,7 @@ static TopDocsCollectorContext createTopDocsCollectorContext(SearchContext searc
hasFilterCollector
) {
@Override
- boolean shouldRescore() {
+ public boolean shouldRescore() {
return rescore;
}
};
diff --git a/server/src/main/java/org/opensearch/search/query/TotalHitCountCollectorManager.java b/server/src/main/java/org/opensearch/search/query/TotalHitCountCollectorManager.java
new file mode 100644
index 0000000000000..6d4159c977743
--- /dev/null
+++ b/server/src/main/java/org/opensearch/search/query/TotalHitCountCollectorManager.java
@@ -0,0 +1,106 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.search.query;
+
+import org.apache.lucene.search.CollectorManager;
+import org.apache.lucene.search.ScoreMode;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopFieldDocs;
+import org.apache.lucene.search.TotalHitCountCollector;
+import org.apache.lucene.search.TotalHits;
+import org.opensearch.common.lucene.Lucene;
+import org.opensearch.common.lucene.search.TopDocsAndMaxScore;
+
+import java.io.IOException;
+import java.util.Collection;
+
+public class TotalHitCountCollectorManager
+ implements
+ CollectorManager,
+ EarlyTerminatingListener {
+
+ private static final TotalHitCountCollector EMPTY_COLLECTOR = new TotalHitCountCollector() {
+ @Override
+ public void collect(int doc) {}
+
+ @Override
+ public ScoreMode scoreMode() {
+ return ScoreMode.COMPLETE_NO_SCORES;
+ }
+ };
+
+ private final Sort sort;
+ private Integer terminatedAfter;
+
+ public TotalHitCountCollectorManager(final Sort sort) {
+ this.sort = sort;
+ }
+
+ @Override
+ public void onEarlyTermination(int maxCountHits, boolean forcedTermination) {
+ terminatedAfter = maxCountHits;
+ }
+
+ @Override
+ public TotalHitCountCollector newCollector() throws IOException {
+ return new TotalHitCountCollector();
+ }
+
+ @Override
+ public ReduceableSearchResult reduce(Collection collectors) throws IOException {
+ return (QuerySearchResult result) -> {
+ final TotalHits.Relation relation = (terminatedAfter != null)
+ ? TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO
+ : TotalHits.Relation.EQUAL_TO;
+
+ int totalHits = collectors.stream().mapToInt(TotalHitCountCollector::getTotalHits).sum();
+ if (terminatedAfter != null && totalHits > terminatedAfter) {
+ totalHits = terminatedAfter;
+ }
+
+ final TotalHits totalHitCount = new TotalHits(totalHits, relation);
+ final TopDocs topDocs = (sort != null)
+ ? new TopFieldDocs(totalHitCount, Lucene.EMPTY_SCORE_DOCS, sort.getSort())
+ : new TopDocs(totalHitCount, Lucene.EMPTY_SCORE_DOCS);
+
+ result.topDocs(new TopDocsAndMaxScore(topDocs, Float.NaN), null);
+ };
+ }
+
+ static class Empty implements CollectorManager {
+ private final TotalHits totalHits;
+ private final Sort sort;
+
+ Empty(final TotalHits totalHits, final Sort sort) {
+ this.totalHits = totalHits;
+ this.sort = sort;
+ }
+
+ @Override
+ public TotalHitCountCollector newCollector() throws IOException {
+ return EMPTY_COLLECTOR;
+ }
+
+ @Override
+ public ReduceableSearchResult reduce(Collection collectors) throws IOException {
+ return (QuerySearchResult result) -> {
+ final TopDocs topDocs;
+
+ if (sort != null) {
+ topDocs = new TopFieldDocs(totalHits, Lucene.EMPTY_SCORE_DOCS, sort.getSort());
+ } else {
+ topDocs = new TopDocs(totalHits, Lucene.EMPTY_SCORE_DOCS);
+ }
+
+ result.topDocs(new TopDocsAndMaxScore(topDocs, Float.NaN), null);
+ };
+ }
+ }
+}
diff --git a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java
index e1cf74bdd6aeb..f6ca12f1c514c 100644
--- a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java
+++ b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java
@@ -32,6 +32,8 @@
package org.opensearch.search;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.tests.index.RandomIndexWriter;
import org.apache.lucene.search.IndexSearcher;
@@ -76,7 +78,12 @@
import org.opensearch.threadpool.ThreadPool;
import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
import java.util.UUID;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.function.Supplier;
@@ -91,6 +98,25 @@
import static org.mockito.Mockito.when;
public class DefaultSearchContextTests extends OpenSearchTestCase {
+ private final ExecutorService executor;
+
+ @ParametersFactory
+ public static Collection concurrency() {
+ return Arrays.asList(new Integer[] { 0 }, new Integer[] { 5 });
+ }
+
+ public DefaultSearchContextTests(int concurrency) {
+ this.executor = (concurrency > 0) ? Executors.newFixedThreadPool(concurrency) : null;
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ super.tearDown();
+
+ if (executor != null) {
+ ThreadPool.terminate(executor, 10, TimeUnit.SECONDS);
+ }
+ }
public void testPreProcess() throws Exception {
TimeValue timeout = new TimeValue(randomIntBetween(1, 100));
@@ -183,7 +209,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) {
false,
Version.CURRENT,
false,
- null
+ executor
);
contextWithoutScroll.from(300);
contextWithoutScroll.close();
@@ -225,7 +251,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) {
false,
Version.CURRENT,
false,
- null
+ executor
);
context1.from(300);
exception = expectThrows(IllegalArgumentException.class, () -> context1.preProcess(false));
@@ -295,7 +321,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) {
false,
Version.CURRENT,
false,
- null
+ executor
);
SliceBuilder sliceBuilder = mock(SliceBuilder.class);
@@ -334,7 +360,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) {
false,
Version.CURRENT,
false,
- null
+ executor
);
ParsedQuery parsedQuery = ParsedQuery.parsedMatchAllQuery();
context3.sliceBuilder(null).parsedQuery(parsedQuery).preProcess(false);
@@ -365,7 +391,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) {
false,
Version.CURRENT,
false,
- null
+ executor
);
context4.sliceBuilder(new SliceBuilder(1, 2)).parsedQuery(parsedQuery).preProcess(false);
Query query1 = context4.query();
@@ -446,7 +472,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) {
false,
Version.CURRENT,
false,
- null
+ executor
);
assertThat(context.searcher().hasCancellations(), is(false));
context.searcher().addQueryCancellation(() -> {});
diff --git a/server/src/test/java/org/opensearch/search/SearchCancellationTests.java b/server/src/test/java/org/opensearch/search/SearchCancellationTests.java
index 1927558f94094..f479f3a1b99f1 100644
--- a/server/src/test/java/org/opensearch/search/SearchCancellationTests.java
+++ b/server/src/test/java/org/opensearch/search/SearchCancellationTests.java
@@ -108,7 +108,8 @@ public void testAddingCancellationActions() throws IOException {
IndexSearcher.getDefaultSimilarity(),
IndexSearcher.getDefaultQueryCache(),
IndexSearcher.getDefaultQueryCachingPolicy(),
- true
+ true,
+ null
);
NullPointerException npe = expectThrows(NullPointerException.class, () -> searcher.addQueryCancellation(null));
assertEquals("cancellation runnable should not be null", npe.getMessage());
@@ -127,7 +128,8 @@ public void testCancellableCollector() throws IOException {
IndexSearcher.getDefaultSimilarity(),
IndexSearcher.getDefaultQueryCache(),
IndexSearcher.getDefaultQueryCachingPolicy(),
- true
+ true,
+ null
);
searcher.search(new MatchAllDocsQuery(), collector1);
@@ -154,7 +156,8 @@ public void testExitableDirectoryReader() throws IOException {
IndexSearcher.getDefaultSimilarity(),
IndexSearcher.getDefaultQueryCache(),
IndexSearcher.getDefaultQueryCachingPolicy(),
- true
+ true,
+ null
);
searcher.addQueryCancellation(cancellation);
CompiledAutomaton automaton = new CompiledAutomaton(new RegExp("a.*").toAutomaton());
diff --git a/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java b/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java
index de0a31b9dc04b..eb7dde4b0b2ce 100644
--- a/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java
+++ b/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java
@@ -258,7 +258,8 @@ public void onRemoval(ShardId shardId, Accountable accountable) {
IndexSearcher.getDefaultSimilarity(),
IndexSearcher.getDefaultQueryCache(),
IndexSearcher.getDefaultQueryCachingPolicy(),
- true
+ true,
+ null
);
for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
diff --git a/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java b/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java
index afaab15e1431e..7f4dcdaed2aa1 100644
--- a/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java
+++ b/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java
@@ -32,8 +32,6 @@
package org.opensearch.search.profile.query;
-import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
-
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.StringField;
@@ -64,18 +62,12 @@
import org.opensearch.search.internal.ContextIndexSearcher;
import org.opensearch.search.profile.ProfileResult;
import org.opensearch.test.OpenSearchTestCase;
-import org.opensearch.threadpool.ThreadPool;
import org.junit.After;
import org.junit.Before;
import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collection;
import java.util.List;
import java.util.Map;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
@@ -85,16 +77,6 @@ public class QueryProfilerTests extends OpenSearchTestCase {
private Directory dir;
private IndexReader reader;
private ContextIndexSearcher searcher;
- private ExecutorService executor;
-
- @ParametersFactory
- public static Collection concurrency() {
- return Arrays.asList(new Integer[] { 0 }, new Integer[] { 5 });
- }
-
- public QueryProfilerTests(int concurrency) {
- this.executor = (concurrency > 0) ? Executors.newFixedThreadPool(concurrency) : null;
- }
@Before
public void setUp() throws Exception {
@@ -120,7 +102,7 @@ public void setUp() throws Exception {
IndexSearcher.getDefaultQueryCache(),
ALWAYS_CACHE_POLICY,
true,
- executor
+ null
);
}
@@ -134,10 +116,6 @@ public void tearDown() throws Exception {
assertThat(cache.getTotalCount(), equalTo(cache.getMissCount()));
assertThat(cache.getCacheSize(), equalTo(0L));
- if (executor != null) {
- ThreadPool.terminate(executor, 10, TimeUnit.SECONDS);
- }
-
IOUtils.close(reader, dir);
dir = null;
reader = null;
@@ -145,7 +123,7 @@ public void tearDown() throws Exception {
}
public void testBasic() throws IOException {
- QueryProfiler profiler = new QueryProfiler(searcher.allowConcurrentSegmentSearch());
+ QueryProfiler profiler = new QueryProfiler(false);
searcher.setProfiler(profiler);
Query query = new TermQuery(new Term("foo", "bar"));
searcher.search(query, 1);
@@ -171,7 +149,7 @@ public void testBasic() throws IOException {
}
public void testNoScoring() throws IOException {
- QueryProfiler profiler = new QueryProfiler(searcher.allowConcurrentSegmentSearch());
+ QueryProfiler profiler = new QueryProfiler(false);
searcher.setProfiler(profiler);
Query query = new TermQuery(new Term("foo", "bar"));
searcher.search(query, 1, Sort.INDEXORDER); // scores are not needed
@@ -197,7 +175,7 @@ public void testNoScoring() throws IOException {
}
public void testUseIndexStats() throws IOException {
- QueryProfiler profiler = new QueryProfiler(searcher.allowConcurrentSegmentSearch());
+ QueryProfiler profiler = new QueryProfiler(false);
searcher.setProfiler(profiler);
Query query = new TermQuery(new Term("foo", "bar"));
searcher.count(query); // will use index stats
@@ -211,7 +189,7 @@ public void testUseIndexStats() throws IOException {
}
public void testApproximations() throws IOException {
- QueryProfiler profiler = new QueryProfiler(searcher.allowConcurrentSegmentSearch());
+ QueryProfiler profiler = new QueryProfiler(false);
searcher.setProfiler(profiler);
Query query = new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random());
searcher.count(query);
diff --git a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java
index b87c11dce5be2..1232347edea64 100644
--- a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java
+++ b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java
@@ -39,6 +39,7 @@
import org.apache.lucene.document.LatLonPoint;
import org.apache.lucene.document.LongPoint;
import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
@@ -77,6 +78,7 @@
import org.apache.lucene.search.TotalHitCountCollector;
import org.apache.lucene.search.TotalHits;
import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.grouping.CollapseTopFieldDocs;
import org.apache.lucene.search.join.BitSetProducer;
import org.apache.lucene.search.join.ScoreMode;
import org.apache.lucene.store.Directory;
@@ -88,12 +90,15 @@
import org.opensearch.index.mapper.MappedFieldType;
import org.opensearch.index.mapper.MapperService;
import org.opensearch.index.mapper.NumberFieldMapper;
+import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType;
+import org.opensearch.index.mapper.NumberFieldMapper.NumberType;
import org.opensearch.index.query.ParsedQuery;
import org.opensearch.index.query.QueryShardContext;
import org.opensearch.index.search.OpenSearchToParentBlockJoinQuery;
import org.opensearch.index.shard.IndexShard;
import org.opensearch.index.shard.IndexShardTestCase;
import org.opensearch.search.DocValueFormat;
+import org.opensearch.search.collapse.CollapseBuilder;
import org.opensearch.search.internal.ContextIndexSearcher;
import org.opensearch.search.internal.ScrollContext;
import org.opensearch.search.internal.SearchContext;
@@ -144,7 +149,7 @@ private void countTestCase(Query query, IndexReader reader, boolean shouldCollec
context.parsedQuery(new ParsedQuery(query));
context.setSize(0);
context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
- final boolean rescore = QueryPhase.executeInternal(context);
+ final boolean rescore = QueryPhase.executeInternal(context.withCleanQueryResult());
assertFalse(rescore);
ContextIndexSearcher countSearcher = shouldCollectCount
@@ -157,7 +162,7 @@ private void countTestCase(boolean withDeletions) throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
- final int numDocs = scaledRandomIntBetween(100, 200);
+ final int numDocs = scaledRandomIntBetween(600, 900);
for (int i = 0; i < numDocs; ++i) {
Document doc = new Document();
if (randomBoolean()) {
@@ -228,12 +233,12 @@ public void testPostFilterDisablesCountOptimization() throws Exception {
context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value);
context.setSearcher(newContextSearcher(reader));
context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery()));
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value);
reader.close();
dir.close();
@@ -261,7 +266,7 @@ public void testTerminateAfterWithFilter() throws Exception {
context.setSize(10);
for (int i = 0; i < 10; i++) {
context.parsedPostFilter(new ParsedQuery(new TermQuery(new Term("foo", Integer.toString(i)))));
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value);
assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
}
@@ -283,12 +288,13 @@ public void testMinScoreDisablesCountOptimization() throws Exception {
context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
context.setSize(0);
context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value);
context.minimumScore(100);
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation);
reader.close();
dir.close();
}
@@ -297,7 +303,7 @@ public void testQueryCapturesThreadPoolStats() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
- final int numDocs = scaledRandomIntBetween(100, 200);
+ final int numDocs = scaledRandomIntBetween(600, 900);
for (int i = 0; i < numDocs; ++i) {
w.addDocument(new Document());
}
@@ -307,7 +313,7 @@ public void testQueryCapturesThreadPoolStats() throws Exception {
context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
QuerySearchResult results = context.queryResult();
assertThat(results.serviceTimeEWMA(), greaterThanOrEqualTo(0L));
assertThat(results.nodeQueueSize(), greaterThanOrEqualTo(0));
@@ -320,7 +326,7 @@ public void testInOrderScrollOptimization() throws Exception {
final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
- final int numDocs = scaledRandomIntBetween(100, 200);
+ final int numDocs = scaledRandomIntBetween(600, 900);
for (int i = 0; i < numDocs; ++i) {
w.addDocument(new Document());
}
@@ -336,14 +342,14 @@ public void testInOrderScrollOptimization() throws Exception {
int size = randomIntBetween(2, 5);
context.setSize(size);
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
assertNull(context.queryResult().terminatedEarly());
assertThat(context.terminateAfter(), equalTo(0));
assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
context.setSearcher(newEarlyTerminationContextSearcher(reader, size));
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
assertThat(context.terminateAfter(), equalTo(size));
assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
@@ -356,7 +362,7 @@ public void testTerminateAfterEarlyTermination() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
- final int numDocs = scaledRandomIntBetween(100, 200);
+ final int numDocs = scaledRandomIntBetween(600, 900);
for (int i = 0; i < numDocs; ++i) {
Document doc = new Document();
if (randomBoolean()) {
@@ -377,25 +383,25 @@ public void testTerminateAfterEarlyTermination() throws Exception {
context.terminateAfter(numDocs);
{
context.setSize(10);
- TotalHitCountCollector collector = new TotalHitCountCollector();
- context.queryCollectors().put(TotalHitCountCollector.class, collector);
- QueryPhase.executeInternal(context);
+ final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create();
+ context.queryCollectorManagers().put(TotalHitCountCollector.class, manager);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertFalse(context.queryResult().terminatedEarly());
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(10));
- assertThat(collector.getTotalHits(), equalTo(numDocs));
+ assertThat(manager.getTotalHits(), equalTo(numDocs));
}
context.terminateAfter(1);
{
context.setSize(1);
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertTrue(context.queryResult().terminatedEarly());
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
context.setSize(0);
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertTrue(context.queryResult().terminatedEarly());
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0));
@@ -403,7 +409,7 @@ public void testTerminateAfterEarlyTermination() throws Exception {
{
context.setSize(1);
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertTrue(context.queryResult().terminatedEarly());
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
@@ -414,38 +420,38 @@ public void testTerminateAfterEarlyTermination() throws Exception {
.add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD)
.build();
context.parsedQuery(new ParsedQuery(bq));
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertTrue(context.queryResult().terminatedEarly());
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
context.setSize(0);
context.parsedQuery(new ParsedQuery(bq));
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertTrue(context.queryResult().terminatedEarly());
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0));
}
{
context.setSize(1);
- TotalHitCountCollector collector = new TotalHitCountCollector();
- context.queryCollectors().put(TotalHitCountCollector.class, collector);
- QueryPhase.executeInternal(context);
+ final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create();
+ context.queryCollectorManagers().put(TotalHitCountCollector.class, manager);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertTrue(context.queryResult().terminatedEarly());
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
- assertThat(collector.getTotalHits(), equalTo(1));
- context.queryCollectors().clear();
+ assertThat(manager.getTotalHits(), equalTo(1));
+ context.queryCollectorManagers().clear();
}
{
context.setSize(0);
- TotalHitCountCollector collector = new TotalHitCountCollector();
- context.queryCollectors().put(TotalHitCountCollector.class, collector);
- QueryPhase.executeInternal(context);
+ final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create();
+ context.queryCollectorManagers().put(TotalHitCountCollector.class, manager);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertTrue(context.queryResult().terminatedEarly());
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0));
- assertThat(collector.getTotalHits(), equalTo(1));
+ assertThat(manager.getTotalHits(), equalTo(1));
}
// tests with trackTotalHits and terminateAfter
@@ -453,9 +459,9 @@ public void testTerminateAfterEarlyTermination() throws Exception {
context.setSize(0);
for (int trackTotalHits : new int[] { -1, 3, 76, 100 }) {
context.trackTotalHitsUpTo(trackTotalHits);
- TotalHitCountCollector collector = new TotalHitCountCollector();
- context.queryCollectors().put(TotalHitCountCollector.class, collector);
- QueryPhase.executeInternal(context);
+ final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create();
+ context.queryCollectorManagers().put(TotalHitCountCollector.class, manager);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertTrue(context.queryResult().terminatedEarly());
if (trackTotalHits == -1) {
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(0L));
@@ -463,16 +469,14 @@ public void testTerminateAfterEarlyTermination() throws Exception {
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) Math.min(trackTotalHits, 10)));
}
assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0));
- assertThat(collector.getTotalHits(), equalTo(10));
+ assertThat(manager.getTotalHits(), equalTo(10));
}
context.terminateAfter(7);
context.setSize(10);
for (int trackTotalHits : new int[] { -1, 3, 75, 100 }) {
context.trackTotalHitsUpTo(trackTotalHits);
- EarlyTerminatingCollector collector = new EarlyTerminatingCollector(new TotalHitCountCollector(), 1, false);
- context.queryCollectors().put(EarlyTerminatingCollector.class, collector);
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertTrue(context.queryResult().terminatedEarly());
if (trackTotalHits == -1) {
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(0L));
@@ -490,7 +494,7 @@ public void testIndexSortingEarlyTermination() throws Exception {
final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
- final int numDocs = scaledRandomIntBetween(100, 200);
+ final int numDocs = scaledRandomIntBetween(600, 900);
for (int i = 0; i < numDocs; ++i) {
Document doc = new Document();
if (randomBoolean()) {
@@ -511,7 +515,7 @@ public void testIndexSortingEarlyTermination() throws Exception {
context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW }));
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
@@ -520,7 +524,7 @@ public void testIndexSortingEarlyTermination() throws Exception {
{
context.parsedPostFilter(new ParsedQuery(new MinDocQuery(1)));
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertNull(context.queryResult().terminatedEarly());
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(numDocs - 1L));
assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
@@ -528,28 +532,28 @@ public void testIndexSortingEarlyTermination() throws Exception {
assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2)));
context.parsedPostFilter(null);
- final TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
- context.queryCollectors().put(TotalHitCountCollector.class, totalHitCountCollector);
- QueryPhase.executeInternal(context);
+ final TestTotalHitCountCollectorManager manager = TestTotalHitCountCollectorManager.create(sort);
+ context.queryCollectorManagers().put(TotalHitCountCollector.class, manager);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertNull(context.queryResult().terminatedEarly());
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2)));
- assertThat(totalHitCountCollector.getTotalHits(), equalTo(numDocs));
- context.queryCollectors().clear();
+ assertThat(manager.getTotalHits(), equalTo(numDocs));
+ context.queryCollectorManagers().clear();
}
{
context.setSearcher(newEarlyTerminationContextSearcher(reader, 1));
context.trackTotalHitsUpTo(SearchContext.TRACK_TOTAL_HITS_DISABLED);
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertNull(context.queryResult().terminatedEarly());
assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2)));
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertNull(context.queryResult().terminatedEarly());
assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
@@ -564,7 +568,7 @@ public void testIndexSortScrollOptimization() throws Exception {
final Sort indexSort = new Sort(new SortField("rank", SortField.Type.INT), new SortField("tiebreaker", SortField.Type.INT));
IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(indexSort);
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
- final int numDocs = scaledRandomIntBetween(100, 200);
+ final int numDocs = scaledRandomIntBetween(600, 900);
for (int i = 0; i < numDocs; ++i) {
Document doc = new Document();
doc.add(new NumericDocValuesField("rank", random().nextInt()));
@@ -592,7 +596,7 @@ public void testIndexSortScrollOptimization() throws Exception {
context.setSize(10);
context.sort(searchSortAndFormat);
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
assertNull(context.queryResult().terminatedEarly());
assertThat(context.terminateAfter(), equalTo(0));
@@ -601,7 +605,7 @@ public void testIndexSortScrollOptimization() throws Exception {
FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[sizeMinus1];
context.setSearcher(newEarlyTerminationContextSearcher(reader, 10));
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertNull(context.queryResult().terminatedEarly());
assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
assertThat(context.terminateAfter(), equalTo(0));
@@ -630,7 +634,8 @@ public void testDisableTopScoreCollection() throws Exception {
IndexWriterConfig iwc = newIndexWriterConfig(new StandardAnalyzer());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
- for (int i = 0; i < 10; i++) {
+ final int numDocs = 2 * scaledRandomIntBetween(50, 450);
+ for (int i = 0; i < numDocs; i++) {
doc.clear();
if (i % 2 == 0) {
doc.add(new TextField("title", "foo bar", Store.NO));
@@ -653,16 +658,16 @@ public void testDisableTopScoreCollection() throws Exception {
context.trackTotalHitsUpTo(3);
TopDocsCollectorContext topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false);
assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.COMPLETE);
- QueryPhase.executeInternal(context);
- assertEquals(5, context.queryResult().topDocs().topDocs.totalHits.value);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
+ assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value);
assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.EQUAL_TO);
assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3));
context.sort(new SortAndFormats(new Sort(new SortField("other", SortField.Type.INT)), new DocValueFormat[] { DocValueFormat.RAW }));
topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false);
assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.TOP_DOCS);
- QueryPhase.executeInternal(context);
- assertEquals(5, context.queryResult().topDocs().topDocs.totalHits.value);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
+ assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value);
assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3));
assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO);
@@ -724,7 +729,7 @@ public void testEnhanceSortOnNumeric() throws Exception {
searchContext.parsedQuery(query);
searchContext.setTask(task);
searchContext.setSize(10);
- QueryPhase.executeInternal(searchContext);
+ QueryPhase.executeInternal(searchContext.withCleanQueryResult());
assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, false);
}
@@ -736,7 +741,7 @@ public void testEnhanceSortOnNumeric() throws Exception {
searchContext.parsedQuery(query);
searchContext.setTask(task);
searchContext.setSize(10);
- QueryPhase.executeInternal(searchContext);
+ QueryPhase.executeInternal(searchContext.withCleanQueryResult());
assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, true);
}
@@ -748,7 +753,7 @@ public void testEnhanceSortOnNumeric() throws Exception {
searchContext.parsedQuery(query);
searchContext.setTask(task);
searchContext.setSize(10);
- QueryPhase.executeInternal(searchContext);
+ QueryPhase.executeInternal(searchContext.withCleanQueryResult());
assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, false);
}
@@ -773,7 +778,7 @@ public void testEnhanceSortOnNumeric() throws Exception {
searchContext.setTask(task);
searchContext.from(5);
searchContext.setSize(0);
- QueryPhase.executeInternal(searchContext);
+ QueryPhase.executeInternal(searchContext.withCleanQueryResult());
assertSortResults(searchContext.queryResult().topDocs().topDocs, (long) numDocs, false);
}
@@ -800,11 +805,15 @@ public void testEnhanceSortOnNumeric() throws Exception {
searchContext.parsedQuery(query);
searchContext.setTask(task);
searchContext.setSize(10);
- QueryPhase.executeInternal(searchContext);
+ QueryPhase.executeInternal(searchContext.withCleanQueryResult());
final TopDocs topDocs = searchContext.queryResult().topDocs().topDocs;
long topValue = (long) ((FieldDoc) topDocs.scoreDocs[0]).fields[0];
assertThat(topValue, greaterThan(afterValue));
assertSortResults(topDocs, (long) numDocs, false);
+
+ final TotalHits totalHits = topDocs.totalHits;
+ assertEquals(TotalHits.Relation.EQUAL_TO, totalHits.relation);
+ assertEquals(numDocs, totalHits.value);
}
reader.close();
@@ -916,13 +925,133 @@ public void testMinScore() throws Exception {
context.setSize(1);
context.trackTotalHitsUpTo(5);
- QueryPhase.executeInternal(context);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
assertEquals(10, context.queryResult().topDocs().topDocs.totalHits.value);
reader.close();
dir.close();
}
+ public void testMaxScore() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("filter", SortField.Type.STRING));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("foo", "bar", Store.NO));
+ doc.add(new StringField("filter", "f1" + ((i > 0) ? " " + Integer.toString(i) : ""), Store.NO));
+ doc.add(new SortedDocValuesField("filter", newBytesRef("f1" + ((i > 0) ? " " + Integer.toString(i) : ""))));
+ w.addDocument(doc);
+ }
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader));
+ context.trackScores(true);
+ context.parsedQuery(
+ new ParsedQuery(
+ new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST)
+ .add(new TermQuery(new Term("filter", "f1")), Occur.SHOULD)
+ .build()
+ )
+ );
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.setSize(1);
+ context.trackTotalHitsUpTo(5);
+
+ QueryPhase.executeInternal(context.withCleanQueryResult());
+ assertFalse(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L));
+
+ context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW }));
+ QueryPhase.executeInternal(context.withCleanQueryResult());
+ assertFalse(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L));
+
+ context.trackScores(false);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
+ assertTrue(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L));
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testCollapseQuerySearchResults() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("user", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+
+ // Always end up with uneven buckets so collapsing is predictable
+ final int numDocs = 2 * scaledRandomIntBetween(600, 900) - 1;
+ for (int i = 0; i < numDocs; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("foo", "bar", Store.NO));
+ doc.add(new NumericDocValuesField("user", i & 1));
+ w.addDocument(doc);
+ }
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+ QueryShardContext queryShardContext = mock(QueryShardContext.class);
+ when(queryShardContext.fieldMapper("user")).thenReturn(
+ new NumberFieldType("user", NumberType.INTEGER, true, false, true, false, null, Collections.emptyMap())
+ );
+
+ TestSearchContext context = new TestSearchContext(queryShardContext, indexShard, newContextSearcher(reader));
+ context.collapse(new CollapseBuilder("user").build(context.getQueryShardContext()));
+ context.trackScores(true);
+ context.parsedQuery(new ParsedQuery(new TermQuery(new Term("foo", "bar"))));
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.setSize(2);
+ context.trackTotalHitsUpTo(5);
+
+ QueryPhase.executeInternal(context.withCleanQueryResult());
+ assertFalse(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class));
+
+ CollapseTopFieldDocs topDocs = (CollapseTopFieldDocs) context.queryResult().topDocs().topDocs;
+ assertThat(topDocs.collapseValues.length, equalTo(2));
+ assertThat(topDocs.collapseValues[0], equalTo(0L)); // user == 0
+ assertThat(topDocs.collapseValues[1], equalTo(1L)); // user == 1
+
+ context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW }));
+ QueryPhase.executeInternal(context.withCleanQueryResult());
+ assertFalse(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class));
+
+ topDocs = (CollapseTopFieldDocs) context.queryResult().topDocs().topDocs;
+ assertThat(topDocs.collapseValues.length, equalTo(2));
+ assertThat(topDocs.collapseValues[0], equalTo(0L)); // user == 0
+ assertThat(topDocs.collapseValues[1], equalTo(1L)); // user == 1
+
+ context.trackScores(false);
+ QueryPhase.executeInternal(context.withCleanQueryResult());
+ assertTrue(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class));
+
+ topDocs = (CollapseTopFieldDocs) context.queryResult().topDocs().topDocs;
+ assertThat(topDocs.collapseValues.length, equalTo(2));
+ assertThat(topDocs.collapseValues[0], equalTo(0L)); // user == 0
+ assertThat(topDocs.collapseValues[1], equalTo(1L)); // user == 1
+
+ reader.close();
+ dir.close();
+ }
+
public void testCancellationDuringPreprocess() throws IOException {
try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig())) {
@@ -982,7 +1111,8 @@ private static ContextIndexSearcher newContextSearcher(IndexReader reader) throw
IndexSearcher.getDefaultSimilarity(),
IndexSearcher.getDefaultQueryCache(),
IndexSearcher.getDefaultQueryCachingPolicy(),
- true
+ true,
+ null
);
}
@@ -992,7 +1122,8 @@ private static ContextIndexSearcher newEarlyTerminationContextSearcher(IndexRead
IndexSearcher.getDefaultSimilarity(),
IndexSearcher.getDefaultQueryCache(),
IndexSearcher.getDefaultQueryCachingPolicy(),
- true
+ true,
+ null
) {
@Override
@@ -1003,6 +1134,32 @@ public void search(List leaves, Weight weight, Collector coll
};
}
+ private static class TestTotalHitCountCollectorManager extends TotalHitCountCollectorManager {
+ private final TotalHitCountCollector collector;
+
+ static TestTotalHitCountCollectorManager create() {
+ return create(null);
+ }
+
+ static TestTotalHitCountCollectorManager create(final Sort sort) {
+ return new TestTotalHitCountCollectorManager(new TotalHitCountCollector(), sort);
+ }
+
+ private TestTotalHitCountCollectorManager(final TotalHitCountCollector collector, final Sort sort) {
+ super(sort);
+ this.collector = collector;
+ }
+
+ @Override
+ public TotalHitCountCollector newCollector() throws IOException {
+ return collector;
+ }
+
+ public int getTotalHits() {
+ return collector.getTotalHits();
+ }
+ }
+
private static class AssertingEarlyTerminationFilterCollector extends FilterCollector {
private final int size;
diff --git a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java
new file mode 100644
index 0000000000000..dfa41edb5cff2
--- /dev/null
+++ b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java
@@ -0,0 +1,1158 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.search.query;
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.spans.SpanNearQuery;
+import org.apache.lucene.queries.spans.SpanTermQuery;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.grouping.CollapseTopFieldDocs;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.FilterCollector;
+import org.apache.lucene.search.FilterLeafCollector;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.MatchNoDocsQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TotalHits;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.tests.index.RandomIndexWriter;
+import org.opensearch.action.search.SearchShardTask;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.common.xcontent.ToXContent;
+import org.opensearch.common.xcontent.XContentBuilder;
+import org.opensearch.common.xcontent.json.JsonXContent;
+import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType;
+import org.opensearch.index.mapper.NumberFieldMapper.NumberType;
+import org.opensearch.index.query.ParsedQuery;
+import org.opensearch.index.query.QueryShardContext;
+import org.opensearch.index.shard.IndexShard;
+import org.opensearch.index.shard.IndexShardTestCase;
+import org.opensearch.lucene.queries.MinDocQuery;
+import org.opensearch.search.DocValueFormat;
+import org.opensearch.search.collapse.CollapseBuilder;
+import org.opensearch.search.internal.ContextIndexSearcher;
+import org.opensearch.search.internal.ScrollContext;
+import org.opensearch.search.internal.SearchContext;
+import org.opensearch.search.profile.ProfileResult;
+import org.opensearch.search.profile.ProfileShardResult;
+import org.opensearch.search.profile.SearchProfileShardResults;
+import org.opensearch.search.profile.query.CollectorResult;
+import org.opensearch.search.profile.query.QueryProfileShardResult;
+import org.opensearch.search.sort.SortAndFormats;
+import org.opensearch.test.TestSearchContext;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.function.Consumer;
+
+import static org.hamcrest.CoreMatchers.not;
+import static org.hamcrest.CoreMatchers.nullValue;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.empty;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+import static org.hamcrest.Matchers.hasSize;
+
+public class QueryProfilePhaseTests extends IndexShardTestCase {
+
+ private IndexShard indexShard;
+
+ @Override
+ public Settings threadPoolSettings() {
+ return Settings.builder().put(super.threadPoolSettings()).put("thread_pool.search.min_queue_size", 10).build();
+ }
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ indexShard = newShard(true);
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ super.tearDown();
+ closeShards(indexShard);
+ }
+
+ public void testPostFilterDisablesCountOptimization() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ Document doc = new Document();
+ w.addDocument(doc);
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+
+ TestSearchContext context = new TestSearchContext(null, indexShard, newEarlyTerminationContextSearcher(reader, 0));
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ context.setSearcher(newContextSearcher(reader));
+ context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery()));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertProfileData(context, collector -> {
+ assertThat(collector.getReason(), equalTo("search_post_filter"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_count"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ }, (query) -> {
+ assertThat(query.getQueryName(), equalTo("MatchNoDocsQuery"));
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, (query) -> {
+ assertThat(query.getQueryName(), equalTo("MatchAllDocsQuery"));
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ });
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testTerminateAfterWithFilter() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ Document doc = new Document();
+ for (int i = 0; i < 10; i++) {
+ doc.add(new StringField("foo", Integer.toString(i), Store.NO));
+ }
+ w.addDocument(doc);
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader));
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+ context.terminateAfter(1);
+ context.setSize(10);
+ for (int i = 0; i < 10; i++) {
+ context.parsedPostFilter(new ParsedQuery(new TermQuery(new Term("foo", Integer.toString(i)))));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertProfileData(context, collector -> {
+ assertThat(collector.getReason(), equalTo("search_post_filter"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_terminate_after_count"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren().get(0).getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getProfiledChildren().get(0).getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getProfiledChildren().get(0).getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ }, (query) -> {
+ assertThat(query.getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, (query) -> {
+ assertThat(query.getQueryName(), equalTo("MatchAllDocsQuery"));
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(1L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ });
+ }
+ reader.close();
+ dir.close();
+ }
+
+ public void testMinScoreDisablesCountOptimization() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ Document doc = new Document();
+ w.addDocument(doc);
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newEarlyTerminationContextSearcher(reader, 0));
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+ context.setSize(0);
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ context.minimumScore(100);
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation);
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThanOrEqualTo(100L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(1L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_min_score"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_count"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testInOrderScrollOptimization() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; ++i) {
+ w.addDocument(new Document());
+ }
+ w.close();
+ IndexReader reader = DirectoryReader.open(dir);
+ ScrollContext scrollContext = new ScrollContext();
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader), scrollContext);
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+ scrollContext.lastEmittedDoc = null;
+ scrollContext.maxScore = Float.NaN;
+ scrollContext.totalHits = null;
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ int size = randomIntBetween(2, 5);
+ context.setSize(size);
+
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.terminateAfter(), equalTo(0));
+ assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ context.setSearcher(newEarlyTerminationContextSearcher(reader, size));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.terminateAfter(), equalTo(size));
+ assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0].doc, greaterThanOrEqualTo(size));
+ assertProfileData(context, "ConstantScoreQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_terminate_after_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testTerminateAfterEarlyTermination() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig();
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; ++i) {
+ Document doc = new Document();
+ if (randomBoolean()) {
+ doc.add(new StringField("foo", "bar", Store.NO));
+ }
+ if (randomBoolean()) {
+ doc.add(new StringField("foo", "baz", Store.NO));
+ }
+ doc.add(new NumericDocValuesField("rank", numDocs - i));
+ w.addDocument(doc);
+ }
+ w.close();
+ final IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader));
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+
+ context.terminateAfter(1);
+ {
+ context.setSize(1);
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertTrue(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_terminate_after_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+
+ context.setSize(0);
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertTrue(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0));
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_terminate_after_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_count"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+ }
+
+ {
+ context.setSize(1);
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertTrue(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_terminate_after_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+ }
+ {
+ context.setSize(1);
+ BooleanQuery bq = new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.SHOULD)
+ .add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD)
+ .build();
+ context.parsedQuery(new ParsedQuery(bq));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertTrue(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertProfileData(context, "BooleanQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren(), hasSize(2));
+ assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_terminate_after_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+ context.setSize(0);
+ context.parsedQuery(new ParsedQuery(bq));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertTrue(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0));
+
+ assertProfileData(context, "BooleanQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren(), hasSize(2));
+ assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), equalTo(0L));
+
+ assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score_count"), equalTo(0L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_terminate_after_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_count"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+ }
+
+ context.terminateAfter(7);
+ context.setSize(10);
+ for (int trackTotalHits : new int[] { -1, 3, 75, 100 }) {
+ context.trackTotalHitsUpTo(trackTotalHits);
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertTrue(context.queryResult().terminatedEarly());
+ if (trackTotalHits == -1) {
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(0L));
+ } else {
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(7L));
+ }
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(7));
+ assertProfileData(context, "BooleanQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(7L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren(), hasSize(2));
+ assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), greaterThan(0L));
+
+ assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score_count"), greaterThan(0L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_terminate_after_count"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+ }
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testIndexSortingEarlyTermination() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("rank", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; ++i) {
+ Document doc = new Document();
+ if (randomBoolean()) {
+ doc.add(new StringField("foo", "bar", Store.NO));
+ }
+ if (randomBoolean()) {
+ doc.add(new StringField("foo", "baz", Store.NO));
+ }
+ doc.add(new NumericDocValuesField("rank", numDocs - i));
+ w.addDocument(doc);
+ }
+ w.close();
+
+ final IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader));
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+ context.setSize(1);
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW }));
+
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
+ FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0];
+ assertThat(fieldDoc.fields[0], equalTo(1));
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ {
+ context.parsedPostFilter(new ParsedQuery(new MinDocQuery(1)));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(numDocs - 1L));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
+ assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2)));
+ assertProfileData(context, collector -> {
+ assertThat(collector.getReason(), equalTo("search_post_filter"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ }, (query) -> {
+ assertThat(query.getQueryName(), equalTo("MinDocQuery"));
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, (query) -> {
+ assertThat(query.getQueryName(), equalTo("MatchAllDocsQuery"));
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ });
+ context.parsedPostFilter(null);
+ }
+
+ {
+ context.setSearcher(newEarlyTerminationContextSearcher(reader, 1));
+ context.trackTotalHitsUpTo(SearchContext.TRACK_TOTAL_HITS_DISABLED);
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
+ assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2)));
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1));
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class));
+ assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2)));
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+ }
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testIndexSortScrollOptimization() throws Exception {
+ Directory dir = newDirectory();
+ final Sort indexSort = new Sort(new SortField("rank", SortField.Type.INT), new SortField("tiebreaker", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(indexSort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; ++i) {
+ Document doc = new Document();
+ doc.add(new NumericDocValuesField("rank", random().nextInt()));
+ doc.add(new NumericDocValuesField("tiebreaker", i));
+ w.addDocument(doc);
+ }
+ if (randomBoolean()) {
+ w.forceMerge(randomIntBetween(1, 10));
+ }
+ w.close();
+
+ final IndexReader reader = DirectoryReader.open(dir);
+ List searchSortAndFormats = new ArrayList<>();
+ searchSortAndFormats.add(new SortAndFormats(indexSort, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW }));
+ // search sort is a prefix of the index sort
+ searchSortAndFormats.add(new SortAndFormats(new Sort(indexSort.getSort()[0]), new DocValueFormat[] { DocValueFormat.RAW }));
+ for (SortAndFormats searchSortAndFormat : searchSortAndFormats) {
+ ScrollContext scrollContext = new ScrollContext();
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader), scrollContext);
+ context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
+ scrollContext.lastEmittedDoc = null;
+ scrollContext.maxScore = Float.NaN;
+ scrollContext.totalHits = null;
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.setSize(10);
+ context.sort(searchSortAndFormat);
+
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.terminateAfter(), equalTo(0));
+ assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
+ assertProfileData(context, "MatchAllDocsQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ int sizeMinus1 = context.queryResult().topDocs().topDocs.scoreDocs.length - 1;
+ FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[sizeMinus1];
+
+ context.setSearcher(newEarlyTerminationContextSearcher(reader, 10));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertNull(context.queryResult().terminatedEarly());
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.terminateAfter(), equalTo(0));
+ assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs));
+ assertProfileData(context, "ConstantScoreQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren(), hasSize(1));
+ assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("SearchAfterSortedDocQuery"));
+ assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+ FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0];
+ for (int i = 0; i < searchSortAndFormat.sort.getSort().length; i++) {
+ @SuppressWarnings("unchecked")
+ FieldComparator comparator = (FieldComparator) searchSortAndFormat.sort.getSort()[i].getComparator(
+ i,
+ false
+ );
+ int cmp = comparator.compareValues(firstDoc.fields[i], lastDoc.fields[i]);
+ if (cmp == 0) {
+ continue;
+ }
+ assertThat(cmp, equalTo(1));
+ break;
+ }
+ }
+ reader.close();
+ dir.close();
+ }
+
+ public void testDisableTopScoreCollection() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(new StandardAnalyzer());
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ Document doc = new Document();
+ final int numDocs = 2 * scaledRandomIntBetween(50, 450);
+ for (int i = 0; i < numDocs; i++) {
+ doc.clear();
+ if (i % 2 == 0) {
+ doc.add(new TextField("title", "foo bar", Store.NO));
+ } else {
+ doc.add(new TextField("title", "foo", Store.NO));
+ }
+ w.addDocument(doc);
+ }
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader));
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ Query q = new SpanNearQuery.Builder("title", true).addClause(new SpanTermQuery(new Term("title", "foo")))
+ .addClause(new SpanTermQuery(new Term("title", "bar")))
+ .build();
+
+ context.parsedQuery(new ParsedQuery(q));
+ context.setSize(3);
+ context.trackTotalHitsUpTo(3);
+ TopDocsCollectorContext topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false);
+ assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.COMPLETE);
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.EQUAL_TO);
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3));
+ assertProfileData(context, "SpanNearQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ context.sort(new SortAndFormats(new Sort(new SortField("other", SortField.Type.INT)), new DocValueFormat[] { DocValueFormat.RAW }));
+ topDocsContext = TopDocsCollectorContext.createTopDocsCollectorContext(context, false);
+ assertEquals(topDocsContext.create(null).scoreMode(), org.apache.lucene.search.ScoreMode.TOP_DOCS);
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertEquals(numDocs / 2, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(3));
+ assertEquals(context.queryResult().topDocs().topDocs.totalHits.relation, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO);
+ assertProfileData(context, "SpanNearQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testMinScore() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig();
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+ for (int i = 0; i < 10; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("foo", "bar", Store.NO));
+ doc.add(new StringField("filter", "f1", Store.NO));
+ w.addDocument(doc);
+ }
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader));
+ context.parsedQuery(
+ new ParsedQuery(
+ new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST)
+ .add(new TermQuery(new Term("filter", "f1")), Occur.SHOULD)
+ .build()
+ )
+ );
+ context.minimumScore(0.01f);
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.setSize(1);
+ context.trackTotalHitsUpTo(5);
+
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertEquals(10, context.queryResult().topDocs().topDocs.totalHits.value);
+ assertProfileData(context, "BooleanQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), equalTo(10L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren(), hasSize(2));
+ assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_min_score"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), hasSize(1));
+ assertThat(collector.getProfiledChildren().get(0).getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ });
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testMaxScore() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("filter", SortField.Type.STRING));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+
+ final int numDocs = scaledRandomIntBetween(600, 900);
+ for (int i = 0; i < numDocs; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("foo", "bar", Store.NO));
+ doc.add(new StringField("filter", "f1" + ((i > 0) ? " " + Integer.toString(i) : ""), Store.NO));
+ doc.add(new SortedDocValuesField("filter", newBytesRef("f1" + ((i > 0) ? " " + Integer.toString(i) : ""))));
+ w.addDocument(doc);
+ }
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+ TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader));
+ context.trackScores(true);
+ context.parsedQuery(
+ new ParsedQuery(
+ new BooleanQuery.Builder().add(new TermQuery(new Term("foo", "bar")), Occur.MUST)
+ .add(new TermQuery(new Term("filter", "f1")), Occur.SHOULD)
+ .build()
+ )
+ );
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.setSize(1);
+ context.trackTotalHitsUpTo(5);
+
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertFalse(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L));
+ assertProfileData(context, "BooleanQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren(), hasSize(2));
+ assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW }));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertFalse(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(1, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, greaterThanOrEqualTo(6L));
+ assertProfileData(context, "BooleanQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren(), hasSize(2));
+ assertThat(query.getProfiledChildren().get(0).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+
+ assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery"));
+ assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ reader.close();
+ dir.close();
+ }
+
+ public void testCollapseQuerySearchResults() throws Exception {
+ Directory dir = newDirectory();
+ final Sort sort = new Sort(new SortField("user", SortField.Type.INT));
+ IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
+
+ // Always end up with uneven buckets so collapsing is predictable
+ final int numDocs = 2 * scaledRandomIntBetween(600, 900) - 1;
+ for (int i = 0; i < numDocs; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("foo", "bar", Store.NO));
+ doc.add(new NumericDocValuesField("user", i & 1));
+ w.addDocument(doc);
+ }
+ w.close();
+
+ IndexReader reader = DirectoryReader.open(dir);
+ QueryShardContext queryShardContext = mock(QueryShardContext.class);
+ when(queryShardContext.fieldMapper("user")).thenReturn(
+ new NumberFieldType("user", NumberType.INTEGER, true, false, true, false, null, Collections.emptyMap())
+ );
+
+ TestSearchContext context = new TestSearchContext(queryShardContext, indexShard, newContextSearcher(reader));
+ context.collapse(new CollapseBuilder("user").build(context.getQueryShardContext()));
+ context.trackScores(true);
+ context.parsedQuery(new ParsedQuery(new TermQuery(new Term("foo", "bar"))));
+ context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()));
+ context.setSize(2);
+ context.trackTotalHitsUpTo(5);
+
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertFalse(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class));
+
+ assertProfileData(context, "TermQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ assertThat(query.getProfiledChildren(), empty());
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ context.sort(new SortAndFormats(sort, new DocValueFormat[] { DocValueFormat.RAW }));
+ QueryPhase.executeInternal(context.withCleanQueryResult().withProfilers());
+ assertFalse(Float.isNaN(context.queryResult().getMaxScore()));
+ assertEquals(2, context.queryResult().topDocs().topDocs.scoreDocs.length);
+ assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
+ assertThat(context.queryResult().topDocs().topDocs, instanceOf(CollapseTopFieldDocs.class));
+
+ assertProfileData(context, "TermQuery", query -> {
+ assertThat(query.getTimeBreakdown().keySet(), not(empty()));
+ assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L));
+ assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L));
+ assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L));
+ assertThat(query.getProfiledChildren(), empty());
+ }, collector -> {
+ assertThat(collector.getReason(), equalTo("search_top_hits"));
+ assertThat(collector.getTime(), greaterThan(0L));
+ assertThat(collector.getProfiledChildren(), empty());
+ });
+
+ reader.close();
+ dir.close();
+ }
+
+ private void assertProfileData(SearchContext context, String type, Consumer query, Consumer collector)
+ throws IOException {
+ assertProfileData(context, collector, (profileResult) -> {
+ assertThat(profileResult.getQueryName(), equalTo(type));
+ assertThat(profileResult.getTime(), greaterThan(0L));
+ query.accept(profileResult);
+ });
+ }
+
+ private void assertProfileData(SearchContext context, Consumer collector, Consumer query1)
+ throws IOException {
+ assertProfileData(context, Arrays.asList(query1), collector, false);
+ }
+
+ private void assertProfileData(
+ SearchContext context,
+ Consumer collector,
+ Consumer query1,
+ Consumer query2
+ ) throws IOException {
+ assertProfileData(context, Arrays.asList(query1, query2), collector, false);
+ }
+
+ private final void assertProfileData(
+ SearchContext context,
+ List> queries,
+ Consumer collector,
+ boolean debug
+ ) throws IOException {
+ assertThat(context.getProfilers(), not(nullValue()));
+
+ final ProfileShardResult result = SearchProfileShardResults.buildShardResults(context.getProfilers(), null);
+ if (debug) {
+ final SearchProfileShardResults results = new SearchProfileShardResults(
+ Collections.singletonMap(indexShard.shardId().toString(), result)
+ );
+
+ try (final XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint()) {
+ builder.startObject();
+ results.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ builder.flush();
+
+ final OutputStream out = builder.getOutputStream();
+ assertThat(out, instanceOf(ByteArrayOutputStream.class));
+
+ logger.info(new String(((ByteArrayOutputStream) out).toByteArray(), StandardCharsets.UTF_8));
+ }
+ }
+
+ assertThat(result.getQueryProfileResults(), hasSize(1));
+
+ final QueryProfileShardResult queryProfileShardResult = result.getQueryProfileResults().get(0);
+ assertThat(queryProfileShardResult.getQueryResults(), hasSize(queries.size()));
+
+ for (int i = 0; i < queries.size(); ++i) {
+ queries.get(i).accept(queryProfileShardResult.getQueryResults().get(i));
+ }
+
+ collector.accept(queryProfileShardResult.getCollectorResult());
+ }
+
+ private static ContextIndexSearcher newContextSearcher(IndexReader reader) throws IOException {
+ return new ContextIndexSearcher(
+ reader,
+ IndexSearcher.getDefaultSimilarity(),
+ IndexSearcher.getDefaultQueryCache(),
+ IndexSearcher.getDefaultQueryCachingPolicy(),
+ true,
+ null
+ );
+ }
+
+ private static ContextIndexSearcher newEarlyTerminationContextSearcher(IndexReader reader, int size) throws IOException {
+ return new ContextIndexSearcher(
+ reader,
+ IndexSearcher.getDefaultSimilarity(),
+ IndexSearcher.getDefaultQueryCache(),
+ IndexSearcher.getDefaultQueryCachingPolicy(),
+ true,
+ null
+ ) {
+
+ @Override
+ public void search(List leaves, Weight weight, Collector collector) throws IOException {
+ final Collector in = new AssertingEarlyTerminationFilterCollector(collector, size);
+ super.search(leaves, weight, in);
+ }
+ };
+ }
+
+ private static class AssertingEarlyTerminationFilterCollector extends FilterCollector {
+ private final int size;
+
+ AssertingEarlyTerminationFilterCollector(Collector in, int size) {
+ super(in);
+ this.size = size;
+ }
+
+ @Override
+ public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
+ final LeafCollector in = super.getLeafCollector(context);
+ return new FilterLeafCollector(in) {
+ int collected;
+
+ @Override
+ public void collect(int doc) throws IOException {
+ assert collected <= size : "should not collect more than " + size + " doc per segment, got " + collected;
+ ++collected;
+ super.collect(doc);
+ }
+ };
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java
index 38a0253305833..832328cb0242f 100644
--- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java
+++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java
@@ -334,7 +334,8 @@ public boolean shouldCache(Query query) {
indexSearcher.getSimilarity(),
queryCache,
queryCachingPolicy,
- false
+ false,
+ null
);
SearchContext searchContext = mock(SearchContext.class);
diff --git a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java
index 0e91332892a55..0b2235a0afedd 100644
--- a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java
+++ b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java
@@ -32,6 +32,7 @@
package org.opensearch.test;
import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.CollectorManager;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.Query;
import org.opensearch.action.OriginalIndices;
@@ -70,6 +71,7 @@
import org.opensearch.search.internal.ShardSearchRequest;
import org.opensearch.search.profile.Profilers;
import org.opensearch.search.query.QuerySearchResult;
+import org.opensearch.search.query.ReduceableSearchResult;
import org.opensearch.search.rescore.RescoreContext;
import org.opensearch.search.sort.SortAndFormats;
import org.opensearch.search.suggest.SuggestionSearchContext;
@@ -90,7 +92,7 @@ public class TestSearchContext extends SearchContext {
final BigArrays bigArrays;
final IndexService indexService;
final BitsetFilterCache fixedBitSetFilterCache;
- final Map, Collector> queryCollectors = new HashMap<>();
+ final Map, CollectorManager extends Collector, ReduceableSearchResult>> queryCollectorManagers = new HashMap<>();
final IndexShard indexShard;
final QuerySearchResult queryResult = new QuerySearchResult();
final QueryShardContext queryShardContext;
@@ -110,7 +112,9 @@ public class TestSearchContext extends SearchContext {
private SearchContextAggregations aggregations;
private ScrollContext scrollContext;
private FieldDoc searchAfter;
- private final long originNanoTime = System.nanoTime();
+ private Profilers profilers;
+ private CollapseContext collapse;
+
private final Map searchExtBuilders = new HashMap<>();
public TestSearchContext(BigArrays bigArrays, IndexService indexService) {
@@ -405,12 +409,13 @@ public FieldDoc searchAfter() {
@Override
public SearchContext collapse(CollapseContext collapse) {
- return null;
+ this.collapse = collapse;
+ return this;
}
@Override
public CollapseContext collapse() {
- return null;
+ return collapse;
}
@Override
@@ -596,12 +601,12 @@ public long getRelativeTimeInMillis() {
@Override
public Profilers getProfilers() {
- return null; // no profiling
+ return profilers;
}
@Override
- public Map, Collector> queryCollectors() {
- return queryCollectors;
+ public Map, CollectorManager extends Collector, ReduceableSearchResult>> queryCollectorManagers() {
+ return queryCollectorManagers;
}
@Override
@@ -633,4 +638,21 @@ public void addRescore(RescoreContext rescore) {
public ReaderContext readerContext() {
throw new UnsupportedOperationException();
}
+
+ /**
+ * Clean the query results by consuming all of it
+ */
+ public TestSearchContext withCleanQueryResult() {
+ queryResult.consumeAll();
+ profilers = null;
+ return this;
+ }
+
+ /**
+ * Add profilers to the query
+ */
+ public TestSearchContext withProfilers() {
+ this.profilers = new Profilers(searcher);
+ return this;
+ }
}
From cc0e66b1dcc2cfe17b76bcea1168ebb996fbb090 Mon Sep 17 00:00:00 2001
From: Owais Kazi
Date: Thu, 24 Mar 2022 19:50:54 -0700
Subject: [PATCH 06/73] Replaced "master" terminology in Log message (#2575)
Changed the log message to cluster-manager from master
Signed-off-by: Owais Kazi
---
.../rest/discovery/Zen2RestApiIT.java | 2 +-
.../UnsafeBootstrapAndDetachCommandIT.java | 20 +++---
.../DedicatedClusterSnapshotRestoreIT.java | 20 +++---
.../AddVotingConfigExclusionsRequest.java | 10 +--
.../cluster/InternalClusterInfoService.java | 10 +--
.../coordination/ClusterBootstrapService.java | 4 +-
.../ClusterFormationFailureHelper.java | 14 ++--
.../cluster/coordination/Coordinator.java | 22 ++++---
.../coordination/DetachClusterCommand.java | 4 +-
.../cluster/coordination/JoinHelper.java | 2 +-
.../cluster/coordination/Reconfigurator.java | 4 +-
.../UnsafeBootstrapMasterCommand.java | 10 +--
.../cluster/node/DiscoveryNodeRole.java | 2 +-
.../cluster/node/DiscoveryNodes.java | 4 +-
.../HandshakingTransportAddressConnector.java | 7 +-
.../org/opensearch/discovery/PeerFinder.java | 6 +-
.../opensearch/env/NodeRepurposeCommand.java | 10 +--
.../opensearch/gateway/GatewayMetaState.java | 6 +-
.../IncrementalClusterStateWriter.java | 3 +-
.../gateway/PersistedClusterStateService.java | 2 +-
.../PersistentTasksClusterService.java | 2 +-
.../repositories/blobstore/package-info.java | 2 +-
...AddVotingConfigExclusionsRequestTests.java | 2 +-
...tAddVotingConfigExclusionsActionTests.java | 4 +-
.../ClusterBootstrapServiceTests.java | 2 +-
.../ClusterFormationFailureHelperTests.java | 64 +++++++++----------
.../coordination/CoordinatorTests.java | 8 ++-
.../discovery/AbstractDisruptionTestCase.java | 2 +-
.../AbstractCoordinatorTestCase.java | 2 +-
.../opensearch/test/InternalTestCluster.java | 7 +-
30 files changed, 135 insertions(+), 122 deletions(-)
diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java
index 198cc11d824e7..f7899d91e0cb9 100644
--- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java
+++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java
@@ -176,7 +176,7 @@ public void testFailsOnUnknownNode() throws Exception {
assertThat(e.getResponse().getStatusLine().getStatusCode(), is(400));
assertThat(
e.getMessage(),
- Matchers.containsString("add voting config exclusions request for [invalid] matched no master-eligible nodes")
+ Matchers.containsString("add voting config exclusions request for [invalid] matched no cluster-manager-eligible nodes")
);
}
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java
index 1447379b93ec8..292469c6e7b79 100644
--- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java
@@ -287,7 +287,7 @@ public void test3MasterNodes2Failed() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(2);
List masterNodes = new ArrayList<>();
- logger.info("--> start 1st master-eligible node");
+ logger.info("--> start 1st cluster-manager-eligible node");
masterNodes.add(
internalCluster().startMasterOnlyNode(
Settings.builder().put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build()
@@ -299,7 +299,7 @@ public void test3MasterNodes2Failed() throws Exception {
Settings.builder().put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build()
); // node ordinal 1
- logger.info("--> start 2nd and 3rd master-eligible nodes and bootstrap");
+ logger.info("--> start 2nd and 3rd cluster-manager-eligible nodes and bootstrap");
masterNodes.addAll(internalCluster().startMasterOnlyNodes(2)); // node ordinals 2 and 3
logger.info("--> wait for all nodes to join the cluster");
@@ -335,19 +335,19 @@ public void test3MasterNodes2Failed() throws Exception {
assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID));
});
- logger.info("--> try to unsafely bootstrap 1st master-eligible node, while node lock is held");
+ logger.info("--> try to unsafely bootstrap 1st cluster-manager-eligible node, while node lock is held");
Environment environmentMaster1 = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(master1DataPathSettings).build()
);
expectThrows(() -> unsafeBootstrap(environmentMaster1), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG);
- logger.info("--> stop 1st master-eligible node and data-only node");
+ logger.info("--> stop 1st cluster-manager-eligible node and data-only node");
NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(0)));
assertBusy(() -> internalCluster().getInstance(GatewayMetaState.class, dataNode).allPendingAsyncStatesWritten());
internalCluster().stopRandomDataNode();
- logger.info("--> unsafely-bootstrap 1st master-eligible node");
+ logger.info("--> unsafely-bootstrap 1st cluster-manager-eligible node");
MockTerminal terminal = unsafeBootstrap(environmentMaster1, false, true);
Metadata metadata = OpenSearchNodeCommand.createPersistedClusterStateService(Settings.EMPTY, nodeEnvironment.nodeDataPaths())
.loadBestOnDiskState().metadata;
@@ -363,7 +363,7 @@ public void test3MasterNodes2Failed() throws Exception {
)
);
- logger.info("--> start 1st master-eligible node");
+ logger.info("--> start 1st cluster-manager-eligible node");
String masterNode2 = internalCluster().startMasterOnlyNode(master1DataPathSettings);
logger.info("--> detach-cluster on data-only node");
@@ -399,7 +399,7 @@ public void test3MasterNodes2Failed() throws Exception {
IndexMetadata indexMetadata = clusterService().state().metadata().index("test");
assertThat(indexMetadata.getSettings().get(IndexMetadata.SETTING_HISTORY_UUID), notNullValue());
- logger.info("--> detach-cluster on 2nd and 3rd master-eligible nodes");
+ logger.info("--> detach-cluster on 2nd and 3rd cluster-manager-eligible nodes");
Environment environmentMaster2 = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(master2DataPathSettings).build()
);
@@ -409,7 +409,7 @@ public void test3MasterNodes2Failed() throws Exception {
);
detachCluster(environmentMaster3, false);
- logger.info("--> start 2nd and 3rd master-eligible nodes and ensure 4 nodes stable cluster");
+ logger.info("--> start 2nd and 3rd cluster-manager-eligible nodes and ensure 4 nodes stable cluster");
bootstrappedNodes.add(internalCluster().startMasterOnlyNode(master2DataPathSettings));
bootstrappedNodes.add(internalCluster().startMasterOnlyNode(master3DataPathSettings));
ensureStableCluster(4);
@@ -422,7 +422,7 @@ public void testAllMasterEligibleNodesFailedDanglingIndexImport() throws Excepti
Settings settings = Settings.builder().put(AUTO_IMPORT_DANGLING_INDICES_SETTING.getKey(), true).build();
- logger.info("--> start mixed data and master-eligible node and bootstrap cluster");
+ logger.info("--> start mixed data and cluster-manager-eligible node and bootstrap cluster");
String masterNode = internalCluster().startNode(settings); // node ordinal 0
logger.info("--> start data-only node and ensure 2 nodes stable cluster");
@@ -457,7 +457,7 @@ public void testAllMasterEligibleNodesFailedDanglingIndexImport() throws Excepti
);
detachCluster(environment, false);
- logger.info("--> stop master-eligible node, clear its data and start it again - new cluster should form");
+ logger.info("--> stop cluster-manager-eligible node, clear its data and start it again - new cluster should form");
internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback() {
@Override
public boolean clearData(String nodeName) {
diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
index 47d57e1260b5f..0c392dbe8bbe6 100644
--- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
@@ -482,8 +482,8 @@ public void testSnapshotWithStuckNode() throws Exception {
try {
assertAcked(deleteSnapshotResponseFuture.actionGet());
} catch (SnapshotMissingException ex) {
- // When master node is closed during this test, it sometime manages to delete the snapshot files before
- // completely stopping. In this case the retried delete snapshot operation on the new master can fail
+ // When cluster-manager node is closed during this test, it sometime manages to delete the snapshot files before
+ // completely stopping. In this case the retried delete snapshot operation on the new cluster-manager can fail
// with SnapshotMissingException
}
@@ -759,7 +759,7 @@ public void testRegistrationFailure() {
logger.info("--> start first node");
internalCluster().startNode();
logger.info("--> start second node");
- // Make sure the first node is elected as master
+ // Make sure the first node is elected as cluster-manager
internalCluster().startNode(nonMasterNode());
// Register mock repositories
for (int i = 0; i < 5; i++) {
@@ -836,7 +836,7 @@ public void sendResponse(RestResponse response) {
}
public void testMasterShutdownDuringSnapshot() throws Exception {
- logger.info("--> starting two master nodes and two data nodes");
+ logger.info("--> starting two cluster-manager nodes and two data nodes");
internalCluster().startMasterOnlyNodes(2);
internalCluster().startDataOnlyNodes(2);
@@ -859,7 +859,7 @@ public void testMasterShutdownDuringSnapshot() throws Exception {
.setIndices("test-idx")
.get();
- logger.info("--> stopping master node");
+ logger.info("--> stopping cluster-manager node");
internalCluster().stopCurrentMasterNode();
logger.info("--> wait until the snapshot is done");
@@ -874,7 +874,7 @@ public void testMasterShutdownDuringSnapshot() throws Exception {
}
public void testMasterAndDataShutdownDuringSnapshot() throws Exception {
- logger.info("--> starting three master nodes and two data nodes");
+ logger.info("--> starting three cluster-manager nodes and two data nodes");
internalCluster().startMasterOnlyNodes(3);
internalCluster().startDataOnlyNodes(2);
@@ -902,7 +902,7 @@ public void testMasterAndDataShutdownDuringSnapshot() throws Exception {
logger.info("--> stopping data node {}", dataNode);
stopNode(dataNode);
- logger.info("--> stopping master node {} ", masterNode);
+ logger.info("--> stopping cluster-manager node {} ", masterNode);
internalCluster().stopCurrentMasterNode();
logger.info("--> wait until the snapshot is done");
@@ -925,7 +925,7 @@ public void testMasterAndDataShutdownDuringSnapshot() throws Exception {
* the cluster.
*/
public void testRestoreShrinkIndex() throws Exception {
- logger.info("--> starting a master node and a data node");
+ logger.info("--> starting a cluster-manager node and a data node");
internalCluster().startMasterOnlyNode();
internalCluster().startDataOnlyNode();
@@ -1144,7 +1144,7 @@ public void testDeduplicateIndexMetadata() throws Exception {
}
public void testDataNodeRestartWithBusyMasterDuringSnapshot() throws Exception {
- logger.info("--> starting a master node and two data nodes");
+ logger.info("--> starting a cluster-manager node and two data nodes");
internalCluster().startMasterOnlyNode();
internalCluster().startDataOnlyNodes(2);
final Path repoPath = randomRepoPath();
@@ -1200,7 +1200,7 @@ public void testDataNodeRestartWithBusyMasterDuringSnapshot() throws Exception {
}
public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception {
- logger.info("--> starting a master node and two data nodes");
+ logger.info("--> starting a cluster-manager node and two data nodes");
internalCluster().startMasterOnlyNode();
final List dataNodes = internalCluster().startDataOnlyNodes(2);
final Path repoPath = randomRepoPath();
diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java
index 99291742145f0..e0e5bf622b99e 100644
--- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java
+++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java
@@ -54,7 +54,7 @@
import java.util.stream.StreamSupport;
/**
- * A request to add voting config exclusions for certain master-eligible nodes, and wait for these nodes to be removed from the voting
+ * A request to add voting config exclusions for certain cluster-manager-eligible nodes, and wait for these nodes to be removed from the voting
* configuration.
*/
public class AddVotingConfigExclusionsRequest extends MasterNodeRequest {
@@ -66,7 +66,7 @@ public class AddVotingConfigExclusionsRequest extends MasterNodeRequest resolveVotingConfigExclusions(ClusterState currentSta
if (newVotingConfigExclusions.isEmpty()) {
throw new IllegalArgumentException(
- "add voting config exclusions request for " + Arrays.asList(nodeDescriptions) + " matched no master-eligible nodes"
+ "add voting config exclusions request for "
+ + Arrays.asList(nodeDescriptions)
+ + " matched no cluster-manager-eligible nodes"
);
}
} else if (nodeIds.length >= 1) {
diff --git a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java
index 05d91fdfd9ebb..5b1c026e5259b 100644
--- a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java
+++ b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java
@@ -77,7 +77,7 @@
* InternalClusterInfoService provides the ClusterInfoService interface,
* routinely updated on a timer. The timer can be dynamically changed by
* setting the cluster.info.update.interval
setting (defaulting
- * to 30 seconds). The InternalClusterInfoService only runs on the master node.
+ * to 30 seconds). The InternalClusterInfoService only runs on the cluster-manager node.
* Listens for changes in the number of data nodes and immediately submits a
* ClusterInfoUpdateJob if a node has been added.
*
@@ -109,7 +109,7 @@ public class InternalClusterInfoService implements ClusterInfoService, ClusterSt
private volatile ImmutableOpenMap leastAvailableSpaceUsages;
private volatile ImmutableOpenMap mostAvailableSpaceUsages;
private volatile IndicesStatsSummary indicesStatsSummary;
- // null if this node is not currently the master
+ // null if this node is not currently the cluster-manager
private final AtomicReference refreshAndRescheduleRunnable = new AtomicReference<>();
private volatile boolean enabled;
private volatile TimeValue fetchTimeout;
@@ -150,8 +150,8 @@ void setUpdateFrequency(TimeValue updateFrequency) {
@Override
public void clusterChanged(ClusterChangedEvent event) {
if (event.localNodeMaster() && refreshAndRescheduleRunnable.get() == null) {
- logger.trace("elected as master, scheduling cluster info update tasks");
- executeRefresh(event.state(), "became master");
+ logger.trace("elected as cluster-manager, scheduling cluster info update tasks");
+ executeRefresh(event.state(), "became cluster-manager");
final RefreshAndRescheduleRunnable newRunnable = new RefreshAndRescheduleRunnable();
refreshAndRescheduleRunnable.set(newRunnable);
@@ -535,7 +535,7 @@ protected void doRun() {
if (this == refreshAndRescheduleRunnable.get()) {
super.doRun();
} else {
- logger.trace("master changed, scheduled refresh job is stale");
+ logger.trace("cluster-manager changed, scheduled refresh job is stale");
}
}
diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java
index ce34a21e4adb6..8df561149eb3d 100644
--- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java
+++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterBootstrapService.java
@@ -135,7 +135,7 @@ public ClusterBootstrapService(
+ DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey()
+ "] set to ["
+ DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE
- + "] must be master-eligible"
+ + "] must be cluster-manager-eligible"
);
}
bootstrapRequirements = Collections.singleton(Node.NODE_NAME_SETTING.get(settings));
@@ -219,7 +219,7 @@ void scheduleUnconfiguredBootstrap() {
logger.info(
"no discovery configuration found, will perform best-effort cluster bootstrapping after [{}] "
- + "unless existing master is discovered",
+ + "unless existing cluster-manager is discovered",
unconfiguredBootstrapTimeout
);
diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java
index c36a2983a011a..0f419aa7a0937 100644
--- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java
+++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java
@@ -192,7 +192,7 @@ String getDescription() {
);
if (clusterState.nodes().getLocalNode().isMasterNode() == false) {
- return String.format(Locale.ROOT, "master not discovered yet: %s", discoveryStateIgnoringQuorum);
+ return String.format(Locale.ROOT, "cluster-manager not discovered yet: %s", discoveryStateIgnoringQuorum);
}
if (clusterState.getLastAcceptedConfiguration().isEmpty()) {
@@ -203,14 +203,14 @@ String getDescription() {
} else {
bootstrappingDescription = String.format(
Locale.ROOT,
- "this node must discover master-eligible nodes %s to bootstrap a cluster",
+ "this node must discover cluster-manager-eligible nodes %s to bootstrap a cluster",
INITIAL_CLUSTER_MANAGER_NODES_SETTING.get(settings)
);
}
return String.format(
Locale.ROOT,
- "master not discovered yet, this node has not previously joined a bootstrapped cluster, and %s: %s",
+ "cluster-manager not discovered yet, this node has not previously joined a bootstrapped cluster, and %s: %s",
bootstrappingDescription,
discoveryStateIgnoringQuorum
);
@@ -221,7 +221,7 @@ String getDescription() {
if (clusterState.getLastCommittedConfiguration().equals(VotingConfiguration.MUST_JOIN_ELECTED_MASTER)) {
return String.format(
Locale.ROOT,
- "master not discovered yet and this node was detached from its previous cluster, have discovered %s; %s",
+ "cluster-manager not discovered yet and this node was detached from its previous cluster, have discovered %s; %s",
foundPeers,
discoveryWillContinueDescription
);
@@ -250,7 +250,7 @@ String getDescription() {
return String.format(
Locale.ROOT,
- "master not discovered or elected yet, an election requires %s, have discovered %s which %s; %s",
+ "cluster-manager not discovered or elected yet, an election requires %s, have discovered %s which %s; %s",
quorumDescription,
foundPeers,
isQuorumOrNot,
@@ -269,8 +269,8 @@ private String describeQuorum(VotingConfiguration votingConfiguration) {
if (nodeIds.size() == 1) {
if (nodeIds.contains(GatewayMetaState.STALE_STATE_CONFIG_NODE_ID)) {
- return "one or more nodes that have already participated as master-eligible nodes in the cluster but this node was "
- + "not master-eligible the last time it joined the cluster";
+ return "one or more nodes that have already participated as cluster-manager-eligible nodes in the cluster but this node was "
+ + "not cluster-manager-eligible the last time it joined the cluster";
} else {
return "a node with id " + realNodeIds;
}
diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java
index 557f11f75d969..89e5b9b4cfbcc 100644
--- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java
+++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java
@@ -510,7 +510,7 @@ private void startElection() {
private void abdicateTo(DiscoveryNode newMaster) {
assert Thread.holdsLock(mutex);
assert mode == Mode.LEADER : "expected to be leader on abdication but was " + mode;
- assert newMaster.isMasterNode() : "should only abdicate to master-eligible node but was " + newMaster;
+ assert newMaster.isMasterNode() : "should only abdicate to cluster-manager-eligible node but was " + newMaster;
final StartJoinRequest startJoinRequest = new StartJoinRequest(newMaster, Math.max(getCurrentTerm(), maxTermSeen) + 1);
logger.info("abdicating to {} with term {}", newMaster, startJoinRequest.getTerm());
getLastAcceptedState().nodes().mastersFirstStream().forEach(node -> {
@@ -563,7 +563,7 @@ private Join joinLeaderInTerm(StartJoinRequest startJoinRequest) {
private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback joinCallback) {
assert Thread.holdsLock(mutex) == false;
- assert getLocalNode().isMasterNode() : getLocalNode() + " received a join but is not master-eligible";
+ assert getLocalNode().isMasterNode() : getLocalNode() + " received a join but is not cluster-manager-eligible";
logger.trace("handleJoinRequest: as {}, handling {}", mode, joinRequest);
if (singleNodeDiscovery && joinRequest.getSourceNode().equals(getLocalNode()) == false) {
@@ -683,7 +683,7 @@ void becomeCandidate(String method) {
void becomeLeader(String method) {
assert Thread.holdsLock(mutex) : "Coordinator mutex not held";
assert mode == Mode.CANDIDATE : "expected candidate but was " + mode;
- assert getLocalNode().isMasterNode() : getLocalNode() + " became a leader but is not master-eligible";
+ assert getLocalNode().isMasterNode() : getLocalNode() + " became a leader but is not cluster-manager-eligible";
logger.debug(
"{}: coordinator becoming LEADER in term {} (was {}, lastKnownLeader was [{}])",
@@ -709,7 +709,7 @@ void becomeLeader(String method) {
void becomeFollower(String method, DiscoveryNode leaderNode) {
assert Thread.holdsLock(mutex) : "Coordinator mutex not held";
- assert leaderNode.isMasterNode() : leaderNode + " became a leader but is not master-eligible";
+ assert leaderNode.isMasterNode() : leaderNode + " became a leader but is not cluster-manager-eligible";
assert mode != Mode.LEADER : "do not switch to follower from leader (should be candidate first)";
if (mode == Mode.FOLLOWER && Optional.of(leaderNode).equals(lastKnownLeader)) {
@@ -751,11 +751,11 @@ void becomeFollower(String method, DiscoveryNode leaderNode) {
}
private void cleanMasterService() {
- masterService.submitStateUpdateTask("clean-up after stepping down as master", new LocalClusterUpdateTask() {
+ masterService.submitStateUpdateTask("clean-up after stepping down as cluster-manager", new LocalClusterUpdateTask() {
@Override
public void onFailure(String source, Exception e) {
// ignore
- logger.trace("failed to clean-up after stepping down as master", e);
+ logger.trace("failed to clean-up after stepping down as cluster-manager", e);
}
@Override
@@ -987,9 +987,9 @@ public boolean setInitialConfiguration(final VotingConfiguration votingConfigura
}
if (getLocalNode().isMasterNode() == false) {
- logger.debug("skip setting initial configuration as local node is not a master-eligible node");
+ logger.debug("skip setting initial configuration as local node is not a cluster-manager-eligible node");
throw new CoordinationStateRejectedException(
- "this node is not master-eligible, but cluster bootstrapping can only happen on a master-eligible node"
+ "this node is not cluster-manager-eligible, but cluster bootstrapping can only happen on a cluster-manager-eligible node"
);
}
@@ -1046,8 +1046,10 @@ ClusterState improveConfiguration(ClusterState clusterState) {
// exclude any nodes whose ID is in the voting config exclusions list ...
final Stream excludedNodeIds = clusterState.getVotingConfigExclusions().stream().map(VotingConfigExclusion::getNodeId);
- // ... and also automatically exclude the node IDs of master-ineligible nodes that were previously master-eligible and are still in
- // the voting config. We could exclude all the master-ineligible nodes here, but there could be quite a few of them and that makes
+ // ... and also automatically exclude the node IDs of cluster-manager-ineligible nodes that were previously cluster-manager-eligible
+ // and are still in
+ // the voting config. We could exclude all the cluster-manager-ineligible nodes here, but there could be quite a few of them and
+ // that makes
// the logging much harder to follow.
final Stream masterIneligibleNodeIdsInVotingConfig = StreamSupport.stream(clusterState.nodes().spliterator(), false)
.filter(
diff --git a/server/src/main/java/org/opensearch/cluster/coordination/DetachClusterCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/DetachClusterCommand.java
index 49d88fd33c724..efa5a5ee600ab 100644
--- a/server/src/main/java/org/opensearch/cluster/coordination/DetachClusterCommand.java
+++ b/server/src/main/java/org/opensearch/cluster/coordination/DetachClusterCommand.java
@@ -47,9 +47,9 @@ public class DetachClusterCommand extends OpenSearchNodeCommand {
static final String CONFIRMATION_MSG = DELIMITER
+ "\n"
+ "You should only run this tool if you have permanently lost all of the\n"
- + "master-eligible nodes in this cluster and you cannot restore the cluster\n"
+ + "cluster-manager-eligible nodes in this cluster and you cannot restore the cluster\n"
+ "from a snapshot, or you have already unsafely bootstrapped a new cluster\n"
- + "by running `opensearch-node unsafe-bootstrap` on a master-eligible\n"
+ + "by running `opensearch-node unsafe-bootstrap` on a cluster-manager-eligible\n"
+ "node that belonged to the same cluster as this node. This tool can cause\n"
+ "arbitrary data loss and its use should be your last resort.\n"
+ "\n"
diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java
index 6d2fb99e04f86..5975e5b64214f 100644
--- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java
+++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java
@@ -489,7 +489,7 @@ public void close(Mode newMode) {
pendingAsTasks.put(task, new JoinTaskListener(task, value));
});
- final String stateUpdateSource = "elected-as-master ([" + pendingAsTasks.size() + "] nodes joined)";
+ final String stateUpdateSource = "elected-as-cluster-manager ([" + pendingAsTasks.size() + "] nodes joined)";
pendingAsTasks.put(JoinTaskExecutor.newBecomeMasterTask(), (source, e) -> {});
pendingAsTasks.put(JoinTaskExecutor.newFinishElectionTask(), (source, e) -> {});
diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java
index 26f289f5547d6..b38b0cf0f4693 100644
--- a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java
+++ b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java
@@ -57,14 +57,14 @@ public class Reconfigurator {
* the best resilience it makes automatic adjustments to the voting configuration as master nodes join or leave the cluster. Adjustments
* that fix or increase the size of the voting configuration are always a good idea, but the wisdom of reducing the voting configuration
* size is less clear. For instance, automatically reducing the voting configuration down to a single node means the cluster requires
- * this node to operate, which is not resilient: if it broke we could restore every other master-eligible node in the cluster to health
+ * this node to operate, which is not resilient: if it broke we could restore every other cluster-manager-eligible node in the cluster to health
* and still the cluster would be unavailable. However not reducing the voting configuration size can also hamper resilience: in a
* five-node cluster we could lose two nodes and by reducing the voting configuration to the remaining three nodes we could tolerate the
* loss of a further node before failing.
*
* We offer two options: either we auto-shrink the voting configuration as long as it contains more than three nodes, or we don't and we
* require the user to control the voting configuration manually using the retirement API. The former, default, option, guarantees that
- * as long as there have been at least three master-eligible nodes in the cluster and no more than one of them is currently unavailable,
+ * as long as there have been at least three cluster-manager-eligible nodes in the cluster and no more than one of them is currently unavailable,
* then the cluster will still operate, which is what almost everyone wants. Manual control is for users who want different guarantees.
*/
public static final Setting CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION = Setting.boolSetting(
diff --git a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java
index e61b6448f6ac9..c6c7e75497e29 100644
--- a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java
+++ b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java
@@ -60,15 +60,15 @@ public class UnsafeBootstrapMasterCommand extends OpenSearchNodeCommand {
static final String CONFIRMATION_MSG = DELIMITER
+ "\n"
+ "You should only run this tool if you have permanently lost half or more\n"
- + "of the master-eligible nodes in this cluster, and you cannot restore the\n"
+ + "of the cluster-manager-eligible nodes in this cluster, and you cannot restore the\n"
+ "cluster from a snapshot. This tool can cause arbitrary data loss and its\n"
- + "use should be your last resort. If you have multiple surviving master\n"
+ + "use should be your last resort. If you have multiple surviving cluster-manager\n"
+ "eligible nodes, you should run this tool on the node with the highest\n"
+ "cluster state (term, version) pair.\n"
+ "\n"
+ "Do you want to proceed?\n";
- static final String NOT_MASTER_NODE_MSG = "unsafe-bootstrap tool can only be run on master eligible node";
+ static final String NOT_MASTER_NODE_MSG = "unsafe-bootstrap tool can only be run on cluster-manager eligible node";
static final String EMPTY_LAST_COMMITTED_VOTING_CONFIG_MSG =
"last committed voting voting configuration is empty, cluster has never been bootstrapped?";
@@ -81,7 +81,9 @@ public class UnsafeBootstrapMasterCommand extends OpenSearchNodeCommand {
private OptionSpec applyClusterReadOnlyBlockOption;
UnsafeBootstrapMasterCommand() {
- super("Forces the successful election of the current node after the permanent loss of the half or more master-eligible nodes");
+ super(
+ "Forces the successful election of the current node after the permanent loss of the half or more cluster-manager-eligible nodes"
+ );
applyClusterReadOnlyBlockOption = parser.accepts("apply-cluster-read-only-block", "Optional cluster.blocks.read_only setting")
.withOptionalArg()
.ofType(Boolean.class);
diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java
index cff1a77f4cdb7..83e35c0ee18ab 100644
--- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java
+++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java
@@ -206,7 +206,7 @@ public Setting legacySetting() {
};
/**
- * Represents the role for a master-eligible node.
+ * Represents the role for a cluster-manager-eligible node.
* @deprecated As of 2.0, because promoting inclusive language, replaced by {@link #CLUSTER_MANAGER_ROLE}
*/
@Deprecated
diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java
index 1097f3bc245ac..8d84869bc8bec 100644
--- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java
+++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java
@@ -574,7 +574,7 @@ public List addedNodes() {
public String shortSummary() {
final StringBuilder summary = new StringBuilder();
if (masterNodeChanged()) {
- summary.append("master node changed {previous [");
+ summary.append("cluster-manager node changed {previous [");
if (previousMasterNode() != null) {
summary.append(previousMasterNode());
}
@@ -799,7 +799,7 @@ public boolean isLocalNodeElectedMaster() {
}
/**
- * Check if the given name of the node role is 'cluster_manger' or 'master'.
+ * Check if the given name of the node role is 'cluster_manager' or 'master'.
* The method is added for {@link #resolveNodes} to keep the code clear, when support the both above roles.
* @deprecated As of 2.0, because promoting inclusive language. MASTER_ROLE is deprecated.
* @param matchAttrName a given String for a name of the node role.
diff --git a/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java b/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java
index 906e28cbb6b51..d2e2c5fbea8ac 100644
--- a/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java
+++ b/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java
@@ -137,7 +137,9 @@ protected void innerOnResponse(DiscoveryNode remoteNode) {
if (remoteNode.equals(transportService.getLocalNode())) {
listener.onFailure(new ConnectTransportException(remoteNode, "local node found"));
} else if (remoteNode.isMasterNode() == false) {
- listener.onFailure(new ConnectTransportException(remoteNode, "non-master-eligible node found"));
+ listener.onFailure(
+ new ConnectTransportException(remoteNode, "non-cluster-manager-eligible node found")
+ );
} else {
transportService.connectToNode(remoteNode, new ActionListener() {
@Override
@@ -153,7 +155,8 @@ public void onResponse(Void ignored) {
@Override
public void onFailure(Exception e) {
// we opened a connection and successfully performed a handshake, so we're definitely
- // talking to a master-eligible node with a matching cluster name and a good version,
+ // talking to a cluster-manager-eligible node with a matching cluster name and a good
+ // version,
// but the attempt to open a full connection to its publish address failed; a common
// reason is that the remote node is listening on 0.0.0.0 but has made an inappropriate
// choice for its publish address.
diff --git a/server/src/main/java/org/opensearch/discovery/PeerFinder.java b/server/src/main/java/org/opensearch/discovery/PeerFinder.java
index 37f07c5d56a9a..fe669e7b6d073 100644
--- a/server/src/main/java/org/opensearch/discovery/PeerFinder.java
+++ b/server/src/main/java/org/opensearch/discovery/PeerFinder.java
@@ -225,7 +225,7 @@ public List getLastResolvedAddresses() {
public interface TransportAddressConnector {
/**
- * Identify the node at the given address and, if it is a master node and not the local node then establish a full connection to it.
+ * Identify the node at the given address and, if it is a cluster-manager node and not the local node then establish a full connection to it.
*/
void connectToRemoteMasterNode(TransportAddress transportAddress, ActionListener listener);
}
@@ -275,7 +275,7 @@ private boolean handleWakeUp() {
return peersRemoved;
}
- logger.trace("probing master nodes from cluster state: {}", lastAcceptedNodes);
+ logger.trace("probing cluster-manager nodes from cluster state: {}", lastAcceptedNodes);
for (ObjectCursor discoveryNodeObjectCursor : lastAcceptedNodes.getMasterNodes().values()) {
startProbe(discoveryNodeObjectCursor.value.getAddress());
}
@@ -381,7 +381,7 @@ void establishConnection() {
transportAddressConnector.connectToRemoteMasterNode(transportAddress, new ActionListener() {
@Override
public void onResponse(DiscoveryNode remoteNode) {
- assert remoteNode.isMasterNode() : remoteNode + " is not master-eligible";
+ assert remoteNode.isMasterNode() : remoteNode + " is not cluster-manager-eligible";
assert remoteNode.equals(getLocalNode()) == false : remoteNode + " is the local node";
synchronized (mutex) {
if (active == false) {
diff --git a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java
index d14b7df8b747a..cb431a6a5d0de 100644
--- a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java
+++ b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java
@@ -68,7 +68,7 @@ public class NodeRepurposeCommand extends OpenSearchNodeCommand {
static final String NO_SHARD_DATA_TO_CLEAN_UP_FOUND = "No shard data to clean-up found";
public NodeRepurposeCommand() {
- super("Repurpose this node to another master/data role, cleaning up any excess persisted data");
+ super("Repurpose this node to another cluster-manager/data role, cleaning up any excess persisted data");
}
void testExecute(Terminal terminal, OptionSet options, Environment env) throws Exception {
@@ -129,7 +129,7 @@ private void processNoMasterNoDataNode(Terminal terminal, Path[] dataPaths, Envi
terminal.println(noMasterMessage(indexUUIDs.size(), shardDataPaths.size(), indexMetadataPaths.size()));
outputHowToSeeVerboseInformation(terminal);
- terminal.println("Node is being re-purposed as no-master and no-data. Clean-up of index data will be performed.");
+ terminal.println("Node is being re-purposed as no-cluster-manager and no-data. Clean-up of index data will be performed.");
confirm(terminal, "Do you want to proceed?");
removePaths(terminal, indexPaths); // clean-up shard dirs
@@ -137,7 +137,7 @@ private void processNoMasterNoDataNode(Terminal terminal, Path[] dataPaths, Envi
MetadataStateFormat.deleteMetaState(dataPaths);
IOUtils.rm(Stream.of(dataPaths).map(path -> path.resolve(INDICES_FOLDER)).toArray(Path[]::new));
- terminal.println("Node successfully repurposed to no-master and no-data.");
+ terminal.println("Node successfully repurposed to no-cluster-manager and no-data.");
}
private void processMasterNoDataNode(Terminal terminal, Path[] dataPaths, Environment env) throws IOException {
@@ -162,12 +162,12 @@ private void processMasterNoDataNode(Terminal terminal, Path[] dataPaths, Enviro
terminal.println(shardMessage(shardDataPaths.size(), indexUUIDs.size()));
outputHowToSeeVerboseInformation(terminal);
- terminal.println("Node is being re-purposed as master and no-data. Clean-up of shard data will be performed.");
+ terminal.println("Node is being re-purposed as cluster-manager and no-data. Clean-up of shard data will be performed.");
confirm(terminal, "Do you want to proceed?");
removePaths(terminal, shardDataPaths); // clean-up shard dirs
- terminal.println("Node successfully repurposed to master and no-data.");
+ terminal.println("Node successfully repurposed to cluster-manager and no-data.");
}
private ClusterState loadClusterState(Terminal terminal, Environment env, PersistedClusterStateService psf) throws IOException {
diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java
index fd978a9c8ed8b..3081c4da8f7a7 100644
--- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java
+++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java
@@ -89,7 +89,7 @@
*
* When started, ensures that this version is compatible with the state stored on disk, and performs a state upgrade if necessary. Note that
* the state being loaded when constructing the instance of this class is not necessarily the state that will be used as {@link
- * ClusterState#metadata()} because it might be stale or incomplete. Master-eligible nodes must perform an election to find a complete and
+ * ClusterState#metadata()} because it might be stale or incomplete. Cluster-manager-eligible nodes must perform an election to find a complete and
* non-stale state, and master-ineligible nodes receive the real cluster state from the elected master after joining the cluster.
*/
public class GatewayMetaState implements Closeable {
@@ -97,7 +97,7 @@ public class GatewayMetaState implements Closeable {
/**
* Fake node ID for a voting configuration written by a master-ineligible data node to indicate that its on-disk state is potentially
* stale (since it is written asynchronously after application, rather than before acceptance). This node ID means that if the node is
- * restarted as a master-eligible node then it does not win any elections until it has received a fresh cluster state.
+ * restarted as a cluster-manager-eligible node then it does not win any elections until it has received a fresh cluster state.
*/
public static final String STALE_STATE_CONFIG_NODE_ID = "STALE_STATE_CONFIG";
@@ -310,7 +310,7 @@ public void applyClusterState(ClusterChangedEvent event) {
}
try {
- // Hack: This is to ensure that non-master-eligible Zen2 nodes always store a current term
+ // Hack: This is to ensure that non-cluster-manager-eligible Zen2 nodes always store a current term
// that's higher than the last accepted term.
// TODO: can we get rid of this hack?
if (event.state().term() > incrementalClusterStateWriter.getPreviousManifest().getCurrentTerm()) {
diff --git a/server/src/main/java/org/opensearch/gateway/IncrementalClusterStateWriter.java b/server/src/main/java/org/opensearch/gateway/IncrementalClusterStateWriter.java
index 4c1a921e9c4ac..4933b70384960 100644
--- a/server/src/main/java/org/opensearch/gateway/IncrementalClusterStateWriter.java
+++ b/server/src/main/java/org/opensearch/gateway/IncrementalClusterStateWriter.java
@@ -333,7 +333,8 @@ void writeManifestAndCleanup(String reason, Manifest manifest) throws WriteState
} catch (WriteStateException e) {
// If the Manifest write results in a dirty WriteStateException it's not safe to roll back, removing the new metadata files,
// because if the Manifest was actually written to disk and its deletion fails it will reference these new metadata files.
- // On master-eligible nodes a dirty WriteStateException here is fatal to the node since we no longer really have any idea
+ // On cluster-manager-eligible nodes a dirty WriteStateException here is fatal to the node since we no longer really have
+ // any idea
// what the state on disk is and the only sensible response is to start again from scratch.
if (e.isDirty() == false) {
rollback();
diff --git a/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java
index 8ccf6375239a2..4bcd6bb9fc7a5 100644
--- a/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java
+++ b/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java
@@ -108,7 +108,7 @@
import java.util.function.Supplier;
/**
- * Stores cluster metadata in a bare Lucene index (per data path) split across a number of documents. This is used by master-eligible nodes
+ * Stores cluster metadata in a bare Lucene index (per data path) split across a number of documents. This is used by cluster-manager-eligible nodes
* to record the last-accepted cluster state during publication. The metadata is written incrementally where possible, leaving alone any
* documents that have not changed. The index has the following fields:
*
diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java
index 00a5f335338c4..eaa623b53ac1c 100644
--- a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java
+++ b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java
@@ -398,7 +398,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS
/**
* Returns true if the cluster state change(s) require to reassign some persistent tasks. It can happen in the following
- * situations: a node left or is added, the routing table changed, the master node changed, the metadata changed or the
+ * situations: a node left or is added, the routing table changed, the cluster-manager node changed, the metadata changed or the
* persistent tasks changed.
*/
boolean shouldReassignPersistentTasks(final ClusterChangedEvent event) {
diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java
index 92ae4b69c45bc..a960cfe70aee7 100644
--- a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java
+++ b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java
@@ -38,7 +38,7 @@
* any {@code BlobStoreRepository} implementation must provide via its implementation of
* {@link org.opensearch.repositories.blobstore.BlobStoreRepository#getBlobContainer()}.
*
- * The blob store is written to and read from by master-eligible nodes and data nodes. All metadata related to a snapshot's
+ *
The blob store is written to and read from by cluster-manager-eligible nodes and data nodes. All metadata related to a snapshot's
* scope and health is written by the master node.
* The data-nodes on the other hand, write the data for each individual shard but do not write any blobs outside of shard directories for
* shards that they hold the primary of. For each shard, the data-node holding the shard's primary writes the actual data in form of
diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java
index 8da65ba13b9cb..a92e4e4a6c536 100644
--- a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java
+++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java
@@ -163,7 +163,7 @@ public void testResolve() {
IllegalArgumentException.class,
() -> makeRequestWithNodeDescriptions("not-a-node").resolveVotingConfigExclusions(clusterState)
).getMessage(),
- equalTo("add voting config exclusions request for [not-a-node] matched no master-eligible nodes")
+ equalTo("add voting config exclusions request for [not-a-node] matched no cluster-manager-eligible nodes")
);
assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE);
}
diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java
index a570db040a805..bff0689a153b3 100644
--- a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java
+++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java
@@ -344,7 +344,7 @@ public void testReturnsErrorIfNoMatchingNodeDescriptions() throws InterruptedExc
assertThat(rootCause, instanceOf(IllegalArgumentException.class));
assertThat(
rootCause.getMessage(),
- equalTo("add voting config exclusions request for [not-a-node] matched no master-eligible nodes")
+ equalTo("add voting config exclusions request for [not-a-node] matched no cluster-manager-eligible nodes")
);
assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE);
}
@@ -368,7 +368,7 @@ public void testOnlyMatchesMasterEligibleNodes() throws InterruptedException {
assertThat(rootCause, instanceOf(IllegalArgumentException.class));
assertThat(
rootCause.getMessage(),
- equalTo("add voting config exclusions request for [_all, master:false] matched no master-eligible nodes")
+ equalTo("add voting config exclusions request for [_all, master:false] matched no cluster-manager-eligible nodes")
);
assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE);
}
diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java
index 079b31f31f599..dd55d078fe2c6 100644
--- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java
+++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java
@@ -705,7 +705,7 @@ public void testFailBootstrapNonMasterEligibleNodeWithSingleNodeDiscovery() {
IllegalArgumentException.class,
() -> new ClusterBootstrapService(settings.build(), transportService, () -> emptyList(), () -> false, vc -> fail())
).getMessage(),
- containsString("node with [discovery.type] set to [single-node] must be master-eligible")
+ containsString("node with [discovery.type] set to [single-node] must be cluster-manager-eligible")
);
}
}
diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java
index 13cdc640008cb..391d7b0e56332 100644
--- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java
+++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java
@@ -191,7 +191,7 @@ public void testDescriptionOnMasterIneligibleNodes() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered yet: have discovered []; discovery will continue using [] from hosts providers "
+ "cluster-manager not discovered yet: have discovered []; discovery will continue using [] from hosts providers "
+ "and [] from last-known cluster state; node term 15, last-accepted version 12 in term 4"
)
);
@@ -208,7 +208,7 @@ public void testDescriptionOnMasterIneligibleNodes() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered yet: have discovered []; discovery will continue using ["
+ "cluster-manager not discovered yet: have discovered []; discovery will continue using ["
+ otherAddress
+ "] from hosts providers and [] from last-known cluster state; node term 16, last-accepted version 12 in term 4"
)
@@ -226,7 +226,7 @@ public void testDescriptionOnMasterIneligibleNodes() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered yet: have discovered ["
+ "cluster-manager not discovered yet: have discovered ["
+ otherNode
+ "]; discovery will continue using [] from hosts providers "
+ "and [] from last-known cluster state; node term 17, last-accepted version 12 in term 4"
@@ -257,7 +257,7 @@ public void testDescriptionForBWCState() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered yet: have discovered []; discovery will continue using [] from hosts providers "
+ "cluster-manager not discovered yet: have discovered []; discovery will continue using [] from hosts providers "
+ "and [] from last-known cluster state; node term 15, last-accepted version 42 in term 0"
)
);
@@ -328,7 +328,7 @@ public void testDescriptionBeforeBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered yet, this node has not previously joined a bootstrapped cluster, and "
+ "cluster-manager not discovered yet, this node has not previously joined a bootstrapped cluster, and "
+ "[cluster.initial_cluster_manager_nodes] is empty on this node: have discovered []; "
+ "discovery will continue using [] from hosts providers and ["
+ localNode
@@ -348,7 +348,7 @@ public void testDescriptionBeforeBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered yet, this node has not previously joined a bootstrapped cluster, and "
+ "cluster-manager not discovered yet, this node has not previously joined a bootstrapped cluster, and "
+ "[cluster.initial_cluster_manager_nodes] is empty on this node: have discovered []; "
+ "discovery will continue using ["
+ otherAddress
@@ -370,7 +370,7 @@ public void testDescriptionBeforeBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered yet, this node has not previously joined a bootstrapped cluster, and "
+ "cluster-manager not discovered yet, this node has not previously joined a bootstrapped cluster, and "
+ "[cluster.initial_cluster_manager_nodes] is empty on this node: have discovered ["
+ otherNode
+ "]; "
@@ -391,8 +391,8 @@ public void testDescriptionBeforeBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered yet, this node has not previously joined a bootstrapped cluster, and "
- + "this node must discover master-eligible nodes [other] to bootstrap a cluster: have discovered []; "
+ "cluster-manager not discovered yet, this node has not previously joined a bootstrapped cluster, and "
+ + "this node must discover cluster-manager-eligible nodes [other] to bootstrap a cluster: have discovered []; "
+ "discovery will continue using [] from hosts providers and ["
+ localNode
+ "] from last-known cluster state; node term 4, last-accepted version 7 in term 4"
@@ -442,7 +442,7 @@ public void testDescriptionAfterDetachCluster() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered yet and this node was detached from its previous cluster, "
+ "cluster-manager not discovered yet and this node was detached from its previous cluster, "
+ "have discovered []; "
+ "discovery will continue using [] from hosts providers and ["
+ localNode
@@ -462,7 +462,7 @@ public void testDescriptionAfterDetachCluster() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered yet and this node was detached from its previous cluster, "
+ "cluster-manager not discovered yet and this node was detached from its previous cluster, "
+ "have discovered []; "
+ "discovery will continue using ["
+ otherAddress
@@ -484,7 +484,7 @@ public void testDescriptionAfterDetachCluster() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered yet and this node was detached from its previous cluster, "
+ "cluster-manager not discovered yet and this node was detached from its previous cluster, "
+ "have discovered ["
+ otherNode
+ "]; "
@@ -506,7 +506,7 @@ public void testDescriptionAfterDetachCluster() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered yet and this node was detached from its previous cluster, "
+ "cluster-manager not discovered yet and this node was detached from its previous cluster, "
+ "have discovered ["
+ yetAnotherNode
+ "]; "
@@ -534,7 +534,7 @@ public void testDescriptionAfterBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered or elected yet, an election requires a node with id [otherNode], "
+ "cluster-manager not discovered or elected yet, an election requires a node with id [otherNode], "
+ "have discovered [] which is not a quorum; "
+ "discovery will continue using [] from hosts providers and ["
+ localNode
@@ -554,7 +554,7 @@ public void testDescriptionAfterBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered or elected yet, an election requires a node with id [otherNode], "
+ "cluster-manager not discovered or elected yet, an election requires a node with id [otherNode], "
+ "have discovered [] which is not a quorum; "
+ "discovery will continue using ["
+ otherAddress
@@ -576,7 +576,7 @@ public void testDescriptionAfterBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered or elected yet, an election requires a node with id [otherNode], "
+ "cluster-manager not discovered or elected yet, an election requires a node with id [otherNode], "
+ "have discovered ["
+ otherNode
+ "] which is a quorum; "
@@ -598,7 +598,7 @@ public void testDescriptionAfterBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered or elected yet, an election requires a node with id [otherNode], "
+ "cluster-manager not discovered or elected yet, an election requires a node with id [otherNode], "
+ "have discovered ["
+ yetAnotherNode
+ "] which is not a quorum; "
@@ -619,7 +619,7 @@ public void testDescriptionAfterBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered or elected yet, an election requires two nodes with ids [n1, n2], "
+ "cluster-manager not discovered or elected yet, an election requires two nodes with ids [n1, n2], "
+ "have discovered [] which is not a quorum; "
+ "discovery will continue using [] from hosts providers and ["
+ localNode
@@ -638,7 +638,7 @@ public void testDescriptionAfterBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered or elected yet, an election requires at least 2 nodes with ids from [n1, n2, n3], "
+ "cluster-manager not discovered or elected yet, an election requires at least 2 nodes with ids from [n1, n2, n3], "
+ "have discovered [] which is not a quorum; "
+ "discovery will continue using [] from hosts providers and ["
+ localNode
@@ -657,7 +657,7 @@ public void testDescriptionAfterBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered or elected yet, an election requires 2 nodes with ids [n1, n2], "
+ "cluster-manager not discovered or elected yet, an election requires 2 nodes with ids [n1, n2], "
+ "have discovered [] which is not a quorum; "
+ "discovery will continue using [] from hosts providers and ["
+ localNode
@@ -676,7 +676,7 @@ public void testDescriptionAfterBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], "
+ "cluster-manager not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], "
+ "have discovered [] which is not a quorum; "
+ "discovery will continue using [] from hosts providers and ["
+ localNode
@@ -695,7 +695,7 @@ public void testDescriptionAfterBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4, n5], "
+ "cluster-manager not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4, n5], "
+ "have discovered [] which is not a quorum; "
+ "discovery will continue using [] from hosts providers and ["
+ localNode
@@ -714,7 +714,7 @@ public void testDescriptionAfterBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], "
+ "cluster-manager not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], "
+ "have discovered [] which is not a quorum; "
+ "discovery will continue using [] from hosts providers and ["
+ localNode
@@ -733,7 +733,7 @@ public void testDescriptionAfterBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered or elected yet, an election requires 3 nodes with ids [n1, n2, n3], "
+ "cluster-manager not discovered or elected yet, an election requires 3 nodes with ids [n1, n2, n3], "
+ "have discovered [] which is not a quorum; "
+ "discovery will continue using [] from hosts providers and ["
+ localNode
@@ -752,7 +752,7 @@ public void testDescriptionAfterBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered or elected yet, an election requires a node with id [n1], "
+ "cluster-manager not discovered or elected yet, an election requires a node with id [n1], "
+ "have discovered [] which is not a quorum; "
+ "discovery will continue using [] from hosts providers and ["
+ localNode
@@ -771,7 +771,7 @@ public void testDescriptionAfterBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered or elected yet, an election requires a node with id [n1] and a node with id [n2], "
+ "cluster-manager not discovered or elected yet, an election requires a node with id [n1] and a node with id [n2], "
+ "have discovered [] which is not a quorum; "
+ "discovery will continue using [] from hosts providers and ["
+ localNode
@@ -790,7 +790,7 @@ public void testDescriptionAfterBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered or elected yet, an election requires a node with id [n1] and two nodes with ids [n2, n3], "
+ "cluster-manager not discovered or elected yet, an election requires a node with id [n1] and two nodes with ids [n2, n3], "
+ "have discovered [] which is not a quorum; "
+ "discovery will continue using [] from hosts providers and ["
+ localNode
@@ -809,7 +809,7 @@ public void testDescriptionAfterBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered or elected yet, an election requires a node with id [n1] and "
+ "cluster-manager not discovered or elected yet, an election requires a node with id [n1] and "
+ "at least 2 nodes with ids from [n2, n3, n4], "
+ "have discovered [] which is not a quorum; "
+ "discovery will continue using [] from hosts providers and ["
@@ -859,7 +859,7 @@ public void testDescriptionAfterBootstrapping() {
// nodes from last-known cluster state could be in either order
is(
oneOf(
- "master not discovered or elected yet, an election requires two nodes with ids [n1, n2], "
+ "cluster-manager not discovered or elected yet, an election requires two nodes with ids [n1, n2], "
+ "have discovered [] which is not a quorum; "
+ "discovery will continue using [] from hosts providers and ["
+ localNode
@@ -867,7 +867,7 @@ public void testDescriptionAfterBootstrapping() {
+ otherMasterNode
+ "] from last-known cluster state; node term 0, last-accepted version 0 in term 0",
- "master not discovered or elected yet, an election requires two nodes with ids [n1, n2], "
+ "cluster-manager not discovered or elected yet, an election requires two nodes with ids [n1, n2], "
+ "have discovered [] which is not a quorum; "
+ "discovery will continue using [] from hosts providers and ["
+ otherMasterNode
@@ -889,8 +889,8 @@ public void testDescriptionAfterBootstrapping() {
new StatusInfo(HEALTHY, "healthy-info")
).getDescription(),
is(
- "master not discovered or elected yet, an election requires one or more nodes that have already participated as "
- + "master-eligible nodes in the cluster but this node was not master-eligible the last time it joined the cluster, "
+ "cluster-manager not discovered or elected yet, an election requires one or more nodes that have already participated as "
+ + "cluster-manager-eligible nodes in the cluster but this node was not cluster-manager-eligible the last time it joined the cluster, "
+ "have discovered [] which is not a quorum; "
+ "discovery will continue using [] from hosts providers and ["
+ localNode
diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java
index 1cdea588564c4..f43d6ff4e6c02 100644
--- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java
+++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java
@@ -109,7 +109,7 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
/**
* This test was added to verify that state recovery is properly reset on a node after it has become master and successfully
* recovered a state (see {@link GatewayService}). The situation which triggers this with a decent likelihood is as follows:
- * 3 master-eligible nodes (leader, follower1, follower2), the followers are shut down (leader remains), when followers come back
+ * 3 cluster-manager-eligible nodes (leader, follower1, follower2), the followers are shut down (leader remains), when followers come back
* one of them becomes leader and publishes first state (with STATE_NOT_RECOVERED_BLOCK) to old leader, which accepts it.
* Old leader is initiating an election at the same time, and wins election. It becomes leader again, but as it previously
* successfully completed state recovery, is never reset to a state where state recovery can be retried.
@@ -1558,7 +1558,9 @@ public void match(LogEvent event) {
final String message = event.getMessage().getFormattedMessage();
assertThat(
message,
- startsWith("master not discovered or elected yet, an election requires at least 2 nodes with ids from [")
+ startsWith(
+ "cluster-manager not discovered or elected yet, an election requires at least 2 nodes with ids from ["
+ )
);
final List matchingNodes = cluster.clusterNodes.stream()
@@ -1729,7 +1731,7 @@ public void testDoesNotPerformElectionWhenRestartingFollower() {
if (cluster.clusterNodes.stream().filter(n -> n.getLocalNode().isMasterNode()).count() == 2) {
// in the 2-node case, auto-shrinking the voting configuration is required to reduce the voting configuration down to just
- // the leader, otherwise restarting the other master-eligible node triggers an election
+ // the leader, otherwise restarting the other cluster-manager-eligible node triggers an election
leader.submitSetAutoShrinkVotingConfiguration(true);
cluster.stabilise(2 * DEFAULT_CLUSTER_STATE_UPDATE_DELAY); // 1st delay for the setting update, 2nd for the reconfiguration
}
diff --git a/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java b/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java
index 5a61300caa89e..e690770b3d0a5 100644
--- a/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java
+++ b/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java
@@ -167,7 +167,7 @@ void assertNoMaster(final String node, @Nullable final ClusterBlock expectedBloc
assertBusy(() -> {
ClusterState state = getNodeClusterState(node);
final DiscoveryNodes nodes = state.nodes();
- assertNull("node [" + node + "] still has [" + nodes.getMasterNode() + "] as master", nodes.getMasterNode());
+ assertNull("node [" + node + "] still has [" + nodes.getMasterNode() + "] as cluster-manager", nodes.getMasterNode());
if (expectedBlocks != null) {
for (ClusterBlockLevel level : expectedBlocks.levels()) {
assertTrue(
diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java
index 9841daa5f81b7..6617102c12ffc 100644
--- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java
+++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java
@@ -321,7 +321,7 @@ class Cluster implements Releasable {
);
logger.info(
- "--> creating cluster of {} nodes (master-eligible nodes: {}) with initial configuration {}",
+ "--> creating cluster of {} nodes (cluster-manager-eligible nodes: {}) with initial configuration {}",
initialNodeCount,
masterEligibleNodeIds,
initialConfiguration
diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java
index 3a28ec2efdd4b..a7c819609c619 100644
--- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java
+++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java
@@ -1121,7 +1121,7 @@ private synchronized void reset(boolean wipeData) throws IOException {
}
assertTrue(
- "expected at least one master-eligible node left in " + nodes,
+ "expected at least one cluster-manager-eligible node left in " + nodes,
nodes.isEmpty() || nodes.values().stream().anyMatch(NodeAndClient::isMasterEligible)
);
@@ -1848,7 +1848,8 @@ private void restartNode(NodeAndClient nodeAndClient, RestartCallback callback)
publishNode(nodeAndClient);
if (callback.validateClusterForming() || excludedNodeIds.isEmpty() == false) {
- // we have to validate cluster size to ensure that the restarted node has rejoined the cluster if it was master-eligible;
+ // we have to validate cluster size to ensure that the restarted node has rejoined the cluster if it was
+ // cluster-manager-eligible;
validateClusterFormed();
}
}
@@ -1999,7 +2000,7 @@ public synchronized Set nodesInclude(String index) {
/**
* Performs cluster bootstrap when node with index {@link #bootstrapMasterNodeIndex} is started
- * with the names of all existing and new master-eligible nodes.
+ * with the names of all existing and new cluster-manager-eligible nodes.
* Indexing starts from 0.
* If {@link #bootstrapMasterNodeIndex} is -1 (default), this method does nothing.
*/
From 908682d437ec744395030fe8c3f01973f596de20 Mon Sep 17 00:00:00 2001
From: Andriy Redko
Date: Fri, 25 Mar 2022 14:45:34 -0400
Subject: [PATCH 07/73] Enable merge on refresh and merge on commit on
Opensearch (#2535)
Enables merge on refresh and merge on commit in Opensearch by
way of two new index options:
index.merge_on_flush.max_full_flush_merge_wait_time and
index.merge_on_flush.enabled. Default merge_on_flush is disabled and
wait time is 10s.
Signed-off-by: Andriy Redko
---
.../common/settings/IndexScopedSettings.java | 2 +
.../org/opensearch/index/IndexSettings.java | 50 +++++
.../index/engine/InternalEngine.java | 16 ++
.../index/engine/InternalEngineTests.java | 206 ++++++++++++++++++
4 files changed, 274 insertions(+)
diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java
index 4c7b3fe25296e..528d6cc9f5e23 100644
--- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java
+++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java
@@ -187,6 +187,8 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
IndexSettings.FINAL_PIPELINE,
MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING,
ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING,
+ IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED,
+ IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME,
// validate that built-in similarities don't get redefined
Setting.groupSetting("index.similarity.", (s) -> {
diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java
index 45d9a57442049..aa69417af1897 100644
--- a/server/src/main/java/org/opensearch/index/IndexSettings.java
+++ b/server/src/main/java/org/opensearch/index/IndexSettings.java
@@ -503,6 +503,27 @@ public final class IndexSettings {
Setting.Property.IndexScope
);
+ /**
+ * Expert: sets the amount of time to wait for merges (during {@link org.apache.lucene.index.IndexWriter#commit}
+ * or {@link org.apache.lucene.index.IndexWriter#getReader(boolean, boolean)}) returned by MergePolicy.findFullFlushMerges(...).
+ * If this time is reached, we proceed with the commit based on segments merged up to that point. The merges are not
+ * aborted, and will still run to completion independent of the commit or getReader call, like natural segment merges.
+ */
+ public static final Setting INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME = Setting.timeSetting(
+ "index.merge_on_flush.max_full_flush_merge_wait_time",
+ new TimeValue(10, TimeUnit.SECONDS),
+ new TimeValue(0, TimeUnit.MILLISECONDS),
+ Property.Dynamic,
+ Property.IndexScope
+ );
+
+ public static final Setting INDEX_MERGE_ON_FLUSH_ENABLED = Setting.boolSetting(
+ "index.merge_on_flush.enabled",
+ false,
+ Property.IndexScope,
+ Property.Dynamic
+ );
+
private final Index index;
private final Version version;
private final Logger logger;
@@ -584,6 +605,15 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) {
*/
private volatile int maxRegexLength;
+ /**
+ * The max amount of time to wait for merges
+ */
+ private volatile TimeValue maxFullFlushMergeWaitTime;
+ /**
+ * Is merge of flush enabled or not
+ */
+ private volatile boolean mergeOnFlushEnabled;
+
/**
* Returns the default search fields for this index.
*/
@@ -696,6 +726,8 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti
mappingTotalFieldsLimit = scopedSettings.get(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING);
mappingDepthLimit = scopedSettings.get(INDEX_MAPPING_DEPTH_LIMIT_SETTING);
mappingFieldNameLengthLimit = scopedSettings.get(INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING);
+ maxFullFlushMergeWaitTime = scopedSettings.get(INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME);
+ mergeOnFlushEnabled = scopedSettings.get(INDEX_MERGE_ON_FLUSH_ENABLED);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio);
scopedSettings.addSettingsUpdateConsumer(
@@ -765,6 +797,8 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti
scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING, this::setMappingTotalFieldsLimit);
scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_DEPTH_LIMIT_SETTING, this::setMappingDepthLimit);
scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING, this::setMappingFieldNameLengthLimit);
+ scopedSettings.addSettingsUpdateConsumer(INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME, this::setMaxFullFlushMergeWaitTime);
+ scopedSettings.addSettingsUpdateConsumer(INDEX_MERGE_ON_FLUSH_ENABLED, this::setMergeOnFlushEnabled);
}
private void setSearchIdleAfter(TimeValue searchIdleAfter) {
@@ -1328,4 +1362,20 @@ public long getMappingFieldNameLengthLimit() {
private void setMappingFieldNameLengthLimit(long value) {
this.mappingFieldNameLengthLimit = value;
}
+
+ private void setMaxFullFlushMergeWaitTime(TimeValue timeValue) {
+ this.maxFullFlushMergeWaitTime = timeValue;
+ }
+
+ private void setMergeOnFlushEnabled(boolean enabled) {
+ this.mergeOnFlushEnabled = enabled;
+ }
+
+ public TimeValue getMaxFullFlushMergeWaitTime() {
+ return this.maxFullFlushMergeWaitTime;
+ }
+
+ public boolean isMergeOnFlushEnabled() {
+ return mergeOnFlushEnabled;
+ }
}
diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java
index 84090047d68e8..6bef118e0b61f 100644
--- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java
+++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java
@@ -50,6 +50,7 @@
import org.apache.lucene.index.ShuffleForcedMergePolicy;
import org.apache.lucene.index.SoftDeletesRetentionMergePolicy;
import org.apache.lucene.index.Term;
+import org.apache.lucene.sandbox.index.MergeOnFlushMergePolicy;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DocIdSetIterator;
@@ -2425,6 +2426,21 @@ private IndexWriterConfig getIndexWriterConfig() {
// to enable it.
mergePolicy = new ShuffleForcedMergePolicy(mergePolicy);
}
+
+ if (config().getIndexSettings().isMergeOnFlushEnabled()) {
+ final long maxFullFlushMergeWaitMillis = config().getIndexSettings().getMaxFullFlushMergeWaitTime().millis();
+ if (maxFullFlushMergeWaitMillis > 0) {
+ iwc.setMaxFullFlushMergeWaitMillis(maxFullFlushMergeWaitMillis);
+ mergePolicy = new MergeOnFlushMergePolicy(mergePolicy);
+ } else {
+ logger.warn(
+ "The {} is enabled but {} is set to 0, merge on flush will not be activated",
+ IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(),
+ IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME.getKey()
+ );
+ }
+ }
+
iwc.setMergePolicy(new OpenSearchMergePolicy(mergePolicy));
iwc.setSimilarity(engineConfig.getSimilarity());
iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac());
diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java
index 361013149578e..c33adf3bcb558 100644
--- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java
+++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java
@@ -494,6 +494,212 @@ public void testSegments() throws Exception {
}
}
+ public void testMergeSegmentsOnCommitIsDisabled() throws Exception {
+ final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
+
+ final Settings.Builder settings = Settings.builder()
+ .put(defaultSettings.getSettings())
+ .put(IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME.getKey(), TimeValue.timeValueMillis(0))
+ .put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), true);
+ final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build();
+ final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata);
+
+ try (
+ Store store = createStore();
+ InternalEngine engine = createEngine(
+ config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get)
+ )
+ ) {
+ assertThat(engine.segments(false), empty());
+ int numDocsFirstSegment = randomIntBetween(5, 50);
+ Set liveDocsFirstSegment = new HashSet<>();
+ for (int i = 0; i < numDocsFirstSegment; i++) {
+ String id = Integer.toString(i);
+ ParsedDocument doc = testParsedDocument(id, null, testDocument(), B_1, null);
+ engine.index(indexForDoc(doc));
+ liveDocsFirstSegment.add(id);
+ }
+ engine.refresh("test");
+ List segments = engine.segments(randomBoolean());
+ assertThat(segments, hasSize(1));
+ assertThat(segments.get(0).getNumDocs(), equalTo(liveDocsFirstSegment.size()));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
+ assertFalse(segments.get(0).committed);
+ int deletes = 0;
+ int updates = 0;
+ int appends = 0;
+ int iterations = scaledRandomIntBetween(1, 50);
+ for (int i = 0; i < iterations && liveDocsFirstSegment.isEmpty() == false; i++) {
+ String idToUpdate = randomFrom(liveDocsFirstSegment);
+ liveDocsFirstSegment.remove(idToUpdate);
+ ParsedDocument doc = testParsedDocument(idToUpdate, null, testDocument(), B_1, null);
+ if (randomBoolean()) {
+ engine.delete(new Engine.Delete(doc.id(), newUid(doc), primaryTerm.get()));
+ deletes++;
+ } else {
+ engine.index(indexForDoc(doc));
+ updates++;
+ }
+ if (randomBoolean()) {
+ engine.index(indexForDoc(testParsedDocument(UUIDs.randomBase64UUID(), null, testDocument(), B_1, null)));
+ appends++;
+ }
+ }
+
+ boolean committed = randomBoolean();
+ if (committed) {
+ engine.flush();
+ }
+
+ engine.refresh("test");
+ segments = engine.segments(randomBoolean());
+
+ assertThat(segments, hasSize(2));
+ assertThat(segments, hasSize(2));
+ assertThat(segments.get(0).getNumDocs(), equalTo(liveDocsFirstSegment.size()));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(updates + deletes));
+ assertThat(segments.get(0).committed, equalTo(committed));
+
+ assertThat(segments.get(1).getNumDocs(), equalTo(updates + appends));
+ assertThat(segments.get(1).getDeletedDocs(), equalTo(deletes)); // delete tombstones
+ assertThat(segments.get(1).committed, equalTo(committed));
+ }
+ }
+
+ public void testMergeSegmentsOnCommit() throws Exception {
+ final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
+
+ final Settings.Builder settings = Settings.builder()
+ .put(defaultSettings.getSettings())
+ .put(IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME.getKey(), TimeValue.timeValueMillis(5000))
+ .put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), true);
+ final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build();
+ final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata);
+
+ try (
+ Store store = createStore();
+ InternalEngine engine = createEngine(
+ config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get)
+ )
+ ) {
+ assertThat(engine.segments(false), empty());
+ int numDocsFirstSegment = randomIntBetween(5, 50);
+ Set liveDocsFirstSegment = new HashSet<>();
+ for (int i = 0; i < numDocsFirstSegment; i++) {
+ String id = Integer.toString(i);
+ ParsedDocument doc = testParsedDocument(id, null, testDocument(), B_1, null);
+ engine.index(indexForDoc(doc));
+ liveDocsFirstSegment.add(id);
+ }
+ engine.refresh("test");
+ List segments = engine.segments(randomBoolean());
+ assertThat(segments, hasSize(1));
+ assertThat(segments.get(0).getNumDocs(), equalTo(liveDocsFirstSegment.size()));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
+ assertFalse(segments.get(0).committed);
+ int deletes = 0;
+ int updates = 0;
+ int appends = 0;
+ int iterations = scaledRandomIntBetween(1, 50);
+ for (int i = 0; i < iterations && liveDocsFirstSegment.isEmpty() == false; i++) {
+ String idToUpdate = randomFrom(liveDocsFirstSegment);
+ liveDocsFirstSegment.remove(idToUpdate);
+ ParsedDocument doc = testParsedDocument(idToUpdate, null, testDocument(), B_1, null);
+ if (randomBoolean()) {
+ engine.delete(new Engine.Delete(doc.id(), newUid(doc), primaryTerm.get()));
+ deletes++;
+ } else {
+ engine.index(indexForDoc(doc));
+ updates++;
+ }
+ if (randomBoolean()) {
+ engine.index(indexForDoc(testParsedDocument(UUIDs.randomBase64UUID(), null, testDocument(), B_1, null)));
+ appends++;
+ }
+ }
+
+ boolean committed = randomBoolean();
+ if (committed) {
+ engine.flush();
+ }
+
+ engine.refresh("test");
+ segments = engine.segments(randomBoolean());
+
+ // All segments have to be merged into one
+ assertThat(segments, hasSize(1));
+ assertThat(segments.get(0).getNumDocs(), equalTo(numDocsFirstSegment + appends - deletes));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(0).committed, equalTo(committed));
+ }
+ }
+
+ // this test writes documents to the engine while concurrently flushing/commit
+ public void testConcurrentMergeSegmentsOnCommit() throws Exception {
+ final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
+
+ final Settings.Builder settings = Settings.builder()
+ .put(defaultSettings.getSettings())
+ .put(IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME.getKey(), TimeValue.timeValueMillis(5000))
+ .put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), true);
+ final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build();
+ final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata);
+
+ try (
+ Store store = createStore();
+ InternalEngine engine = createEngine(
+ config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get)
+ )
+ ) {
+ final int numIndexingThreads = scaledRandomIntBetween(3, 8);
+ final int numDocsPerThread = randomIntBetween(500, 1000);
+ final CyclicBarrier barrier = new CyclicBarrier(numIndexingThreads + 1);
+ final List indexingThreads = new ArrayList<>();
+ final CountDownLatch doneLatch = new CountDownLatch(numIndexingThreads);
+ // create N indexing threads to index documents simultaneously
+ for (int threadNum = 0; threadNum < numIndexingThreads; threadNum++) {
+ final int threadIdx = threadNum;
+ Thread indexingThread = new Thread(() -> {
+ try {
+ barrier.await(); // wait for all threads to start at the same time
+ // index random number of docs
+ for (int i = 0; i < numDocsPerThread; i++) {
+ final String id = "thread" + threadIdx + "#" + i;
+ ParsedDocument doc = testParsedDocument(id, null, testDocument(), B_1, null);
+ engine.index(indexForDoc(doc));
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ } finally {
+ doneLatch.countDown();
+ }
+
+ });
+ indexingThreads.add(indexingThread);
+ }
+
+ // start the indexing threads
+ for (Thread thread : indexingThreads) {
+ thread.start();
+ }
+ barrier.await(); // wait for indexing threads to all be ready to start
+ assertThat(doneLatch.await(10, TimeUnit.SECONDS), is(true));
+
+ boolean committed = randomBoolean();
+ if (committed) {
+ engine.flush();
+ }
+
+ engine.refresh("test");
+ List segments = engine.segments(randomBoolean());
+
+ // All segments have to be merged into one
+ assertThat(segments, hasSize(1));
+ assertThat(segments.get(0).getNumDocs(), equalTo(numIndexingThreads * numDocsPerThread));
+ assertThat(segments.get(0).committed, equalTo(committed));
+ }
+ }
+
public void testCommitStats() throws IOException {
final AtomicLong maxSeqNo = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
final AtomicLong localCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
From d4ce87bddc409beddf81f5ca583ec120c890edda Mon Sep 17 00:00:00 2001
From: Andriy Redko
Date: Fri, 25 Mar 2022 17:53:16 -0400
Subject: [PATCH 08/73] Fix build-tools/reaper source/target compatibility to
be JDK-11 (#2596)
Signed-off-by: Andriy Redko
---
buildSrc/reaper/build.gradle | 3 +++
1 file changed, 3 insertions(+)
diff --git a/buildSrc/reaper/build.gradle b/buildSrc/reaper/build.gradle
index d5e8d6ebc7099..4ccbec894e30e 100644
--- a/buildSrc/reaper/build.gradle
+++ b/buildSrc/reaper/build.gradle
@@ -11,6 +11,9 @@
apply plugin: 'java'
+targetCompatibility = JavaVersion.VERSION_11
+sourceCompatibility = JavaVersion.VERSION_11
+
jar {
archiveFileName = "${project.name}.jar"
manifest {
From f1d35d028ffc807303c75ba677f4c2cff9835041 Mon Sep 17 00:00:00 2001
From: Owais Kazi
Date: Fri, 25 Mar 2022 16:45:06 -0700
Subject: [PATCH 09/73] Added jenkinsfile to run gradle check in OpenSearch
(#2166)
* Added jenkinsfile for gradle check
Signed-off-by: Owais Kazi
* Added jenkinsfile to run gradle check
Signed-off-by: Owais Kazi
* PR comment
Signed-off-by: Owais Kazi
---
jenkins/jenkinsfile | 32 ++++++++++++++++++++++++++++++++
1 file changed, 32 insertions(+)
create mode 100644 jenkins/jenkinsfile
diff --git a/jenkins/jenkinsfile b/jenkins/jenkinsfile
new file mode 100644
index 0000000000000..113cb27c4a610
--- /dev/null
+++ b/jenkins/jenkinsfile
@@ -0,0 +1,32 @@
+pipeline {
+ agent {
+ docker {
+ label 'AL2-X64'
+ /* See
+ https://github.com/opensearch-project/opensearch-build/blob/main/docker/ci/dockerfiles/build.ubuntu18.opensearch.x64.dockerfile
+ for docker image
+ */
+ image 'opensearchstaging/ci-runner:ci-runner-ubuntu1804-build-v1'
+ alwaysPull true
+ }
+ }
+
+ environment {
+ JAVA11_HOME="/opt/java/openjdk-11"
+ JAVA14_HOME="/opt/java/openjdk-14"
+ JAVA17_HOME="/opt/java/openjdk-17"
+ JAVA8_HOME="/opt/java/openjdk-8"
+ JAVA_HOME="/opt/java/openjdk-14"
+ }
+
+ stages {
+ stage('gradle-check') {
+ steps {
+ script {
+ sh 'echo gradle check'
+ sh './gradlew check --no-daemon --no-scan'
+ }
+ }
+ }
+ }
+}
From 5dd75bb0aa4cb64c46e99812b5fd9422588067e6 Mon Sep 17 00:00:00 2001
From: Vacha Shah
Date: Mon, 28 Mar 2022 09:32:18 -0700
Subject: [PATCH 10/73] Removing SLM check in tests for OpenSearch versions
(#2604)
Signed-off-by: Vacha Shah
---
.../java/org/opensearch/test/rest/OpenSearchRestTestCase.java | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java
index 27369e79e5dee..9624a9d3d0554 100644
--- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java
+++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java
@@ -531,7 +531,8 @@ protected boolean waitForAllSnapshotsWiped() {
private void wipeCluster() throws Exception {
// Clean up SLM policies before trying to wipe snapshots so that no new ones get started by SLM after wiping
- if (nodeVersions.first().onOrAfter(LegacyESVersion.V_7_4_0)) { // SLM was introduced in version 7.4
+ if (nodeVersions.first().onOrAfter(LegacyESVersion.V_7_4_0) && nodeVersions.first().before(Version.V_1_0_0)) { // SLM was introduced
+ // in version 7.4
if (preserveSLMPoliciesUponCompletion() == false) {
// Clean up SLM policies before trying to wipe snapshots so that no new ones get started by SLM after wiping
deleteAllSLMPolicies();
From 8b997c1d84fcc3ee99dec915982bdd642a124ea3 Mon Sep 17 00:00:00 2001
From: Sruti Parthiban
Date: Mon, 28 Mar 2022 10:00:59 -0700
Subject: [PATCH 11/73] Add resource stats to task framework (#2089)
* Add resource stats to task framework
Signed-off-by: sruti1312
* Update thread resource info and add tests
Signed-off-by: sruti1312
---
.../org/opensearch/client/tasks/TaskInfo.java | 18 +-
.../core/tasks/GetTaskResponseTests.java | 18 +-
.../tasks/CancelTasksResponseTests.java | 3 +-
.../TransportRethrottleActionTests.java | 6 +-
.../admin/cluster/node/tasks/TasksIT.java | 3 +-
.../rest/action/cat/RestTasksAction.java | 2 +
.../org/opensearch/tasks/ResourceStats.java | 28 ++++
.../opensearch/tasks/ResourceStatsType.java | 32 ++++
.../opensearch/tasks/ResourceUsageInfo.java | 108 ++++++++++++
.../opensearch/tasks/ResourceUsageMetric.java | 27 +++
.../main/java/org/opensearch/tasks/Task.java | 155 +++++++++++++++++-
.../java/org/opensearch/tasks/TaskInfo.java | 39 ++++-
.../opensearch/tasks/TaskResourceStats.java | 106 ++++++++++++
.../opensearch/tasks/TaskResourceUsage.java | 105 ++++++++++++
.../opensearch/tasks/ThreadResourceInfo.java | 54 ++++++
.../admin/cluster/node/tasks/TaskTests.java | 84 +++++++++-
.../tasks/CancelTasksResponseTests.java | 2 +-
.../tasks/ListTasksResponseTests.java | 18 +-
.../org/opensearch/tasks/TaskInfoTests.java | 79 +++++++--
19 files changed, 844 insertions(+), 43 deletions(-)
create mode 100644 server/src/main/java/org/opensearch/tasks/ResourceStats.java
create mode 100644 server/src/main/java/org/opensearch/tasks/ResourceStatsType.java
create mode 100644 server/src/main/java/org/opensearch/tasks/ResourceUsageInfo.java
create mode 100644 server/src/main/java/org/opensearch/tasks/ResourceUsageMetric.java
create mode 100644 server/src/main/java/org/opensearch/tasks/TaskResourceStats.java
create mode 100644 server/src/main/java/org/opensearch/tasks/TaskResourceUsage.java
create mode 100644 server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java
diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java
index de8374b283ea6..375f004dc3052 100644
--- a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java
+++ b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java
@@ -57,6 +57,7 @@ public class TaskInfo {
private TaskId parentTaskId;
private final Map status = new HashMap<>();
private final Map headers = new HashMap<>();
+ private final Map resourceStats = new HashMap<>();
public TaskInfo(TaskId taskId) {
this.taskId = taskId;
@@ -150,6 +151,14 @@ public Map getStatus() {
return status;
}
+ void setResourceStats(Map resourceStats) {
+ this.resourceStats.putAll(resourceStats);
+ }
+
+ public Map getResourceStats() {
+ return resourceStats;
+ }
+
private void noOpParse(Object s) {}
public static final ObjectParser.NamedObjectParser PARSER;
@@ -170,6 +179,7 @@ private void noOpParse(Object s) {}
parser.declareBoolean(TaskInfo::setCancelled, new ParseField("cancelled"));
parser.declareString(TaskInfo::setParentTaskId, new ParseField("parent_task_id"));
parser.declareObject(TaskInfo::setHeaders, (p, c) -> p.mapStrings(), new ParseField("headers"));
+ parser.declareObject(TaskInfo::setResourceStats, (p, c) -> p.map(), new ParseField("resource_stats"));
PARSER = (XContentParser p, Void v, String name) -> parser.parse(p, new TaskInfo(new TaskId(name)), null);
}
@@ -188,7 +198,8 @@ && isCancelled() == taskInfo.isCancelled()
&& Objects.equals(getDescription(), taskInfo.getDescription())
&& Objects.equals(getParentTaskId(), taskInfo.getParentTaskId())
&& Objects.equals(status, taskInfo.status)
- && Objects.equals(getHeaders(), taskInfo.getHeaders());
+ && Objects.equals(getHeaders(), taskInfo.getHeaders())
+ && Objects.equals(getResourceStats(), taskInfo.getResourceStats());
}
@Override
@@ -204,7 +215,8 @@ public int hashCode() {
isCancelled(),
getParentTaskId(),
status,
- getHeaders()
+ getHeaders(),
+ getResourceStats()
);
}
@@ -236,6 +248,8 @@ public String toString() {
+ status
+ ", headers="
+ headers
+ + ", resource_stats="
+ + resourceStats
+ '}';
}
}
diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/core/tasks/GetTaskResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/core/tasks/GetTaskResponseTests.java
index 403e295303784..07ee0bedd4777 100644
--- a/client/rest-high-level/src/test/java/org/opensearch/client/core/tasks/GetTaskResponseTests.java
+++ b/client/rest-high-level/src/test/java/org/opensearch/client/core/tasks/GetTaskResponseTests.java
@@ -38,6 +38,8 @@
import org.opensearch.common.xcontent.ToXContent;
import org.opensearch.common.xcontent.XContentBuilder;
import org.opensearch.tasks.RawTaskStatus;
+import org.opensearch.tasks.TaskResourceStats;
+import org.opensearch.tasks.TaskResourceUsage;
import org.opensearch.tasks.Task;
import org.opensearch.tasks.TaskId;
import org.opensearch.tasks.TaskInfo;
@@ -45,6 +47,7 @@
import java.io.IOException;
import java.util.Collections;
+import java.util.HashMap;
import java.util.Map;
import static org.opensearch.test.AbstractXContentTestCase.xContentTester;
@@ -57,7 +60,7 @@ public void testFromXContent() throws IOException {
)
.assertEqualsConsumer(this::assertEqualInstances)
.assertToXContentEquivalence(true)
- .randomFieldsExcludeFilter(field -> field.endsWith("headers") || field.endsWith("status"))
+ .randomFieldsExcludeFilter(field -> field.endsWith("headers") || field.endsWith("status") || field.contains("resource_stats"))
.test();
}
@@ -106,7 +109,8 @@ static TaskInfo randomTaskInfo() {
cancellable,
cancelled,
parentTaskId,
- headers
+ headers,
+ randomResourceStats()
);
}
@@ -127,4 +131,14 @@ private static RawTaskStatus randomRawTaskStatus() {
throw new IllegalStateException(e);
}
}
+
+ private static TaskResourceStats randomResourceStats() {
+ return randomBoolean() ? null : new TaskResourceStats(new HashMap() {
+ {
+ for (int i = 0; i < randomInt(5); i++) {
+ put(randomAlphaOfLength(5), new TaskResourceUsage(randomNonNegativeLong(), randomNonNegativeLong()));
+ }
+ }
+ });
+ }
}
diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java
index 552a3712eea40..26be36b7162f6 100644
--- a/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java
+++ b/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java
@@ -96,7 +96,8 @@ protected CancelTasksResponseTests.ByNodeCancelTasksResponse createServerTestIns
cancellable,
cancelled,
new TaskId("node1", randomLong()),
- Collections.singletonMap("x-header-of", "some-value")
+ Collections.singletonMap("x-header-of", "some-value"),
+ null
)
);
}
diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/TransportRethrottleActionTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/TransportRethrottleActionTests.java
index a9e1a59b7e443..6456aa0af9aac 100644
--- a/modules/reindex/src/test/java/org/opensearch/index/reindex/TransportRethrottleActionTests.java
+++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/TransportRethrottleActionTests.java
@@ -131,7 +131,8 @@ public void testRethrottleSuccessfulResponse() {
true,
false,
new TaskId("test", task.getId()),
- Collections.emptyMap()
+ Collections.emptyMap(),
+ null
)
);
sliceStatuses.add(new BulkByScrollTask.StatusOrException(status));
@@ -167,7 +168,8 @@ public void testRethrottleWithSomeSucceeded() {
true,
false,
new TaskId("test", task.getId()),
- Collections.emptyMap()
+ Collections.emptyMap(),
+ null
)
);
sliceStatuses.add(new BulkByScrollTask.StatusOrException(status));
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java
index fbac2f7dbff6e..ac0ae44eb732e 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java
@@ -907,7 +907,8 @@ public void testNodeNotFoundButTaskFound() throws Exception {
false,
false,
TaskId.EMPTY_TASK_ID,
- Collections.emptyMap()
+ Collections.emptyMap(),
+ null
),
new RuntimeException("test")
),
diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestTasksAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestTasksAction.java
index b87205593ce87..a6624c2f8cfdc 100644
--- a/server/src/main/java/org/opensearch/rest/action/cat/RestTasksAction.java
+++ b/server/src/main/java/org/opensearch/rest/action/cat/RestTasksAction.java
@@ -137,6 +137,7 @@ protected Table getTableWithHeader(final RestRequest request) {
// Task detailed info
if (detailed) {
table.addCell("description", "default:true;alias:desc;desc:task action");
+ table.addCell("resource_stats", "default:false;desc:resource consumption info of the task");
}
table.endHeaders();
return table;
@@ -173,6 +174,7 @@ private void buildRow(Table table, boolean fullId, boolean detailed, DiscoveryNo
if (detailed) {
table.addCell(taskInfo.getDescription());
+ table.addCell(taskInfo.getResourceStats());
}
table.endRow();
}
diff --git a/server/src/main/java/org/opensearch/tasks/ResourceStats.java b/server/src/main/java/org/opensearch/tasks/ResourceStats.java
new file mode 100644
index 0000000000000..aab103ad08dcf
--- /dev/null
+++ b/server/src/main/java/org/opensearch/tasks/ResourceStats.java
@@ -0,0 +1,28 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.tasks;
+
+/**
+ * Different resource stats are defined.
+ */
+public enum ResourceStats {
+ CPU("cpu_time_in_nanos"),
+ MEMORY("memory_in_bytes");
+
+ private final String statsName;
+
+ ResourceStats(String statsName) {
+ this.statsName = statsName;
+ }
+
+ @Override
+ public String toString() {
+ return statsName;
+ }
+}
diff --git a/server/src/main/java/org/opensearch/tasks/ResourceStatsType.java b/server/src/main/java/org/opensearch/tasks/ResourceStatsType.java
new file mode 100644
index 0000000000000..c670ac5ba689c
--- /dev/null
+++ b/server/src/main/java/org/opensearch/tasks/ResourceStatsType.java
@@ -0,0 +1,32 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.tasks;
+
+/** Defines the different types of resource stats. */
+public enum ResourceStatsType {
+ // resource stats of the worker thread reported directly from runnable.
+ WORKER_STATS("worker_stats", false);
+
+ private final String statsType;
+ private final boolean onlyForAnalysis;
+
+ ResourceStatsType(String statsType, boolean onlyForAnalysis) {
+ this.statsType = statsType;
+ this.onlyForAnalysis = onlyForAnalysis;
+ }
+
+ public boolean isOnlyForAnalysis() {
+ return onlyForAnalysis;
+ }
+
+ @Override
+ public String toString() {
+ return statsType;
+ }
+}
diff --git a/server/src/main/java/org/opensearch/tasks/ResourceUsageInfo.java b/server/src/main/java/org/opensearch/tasks/ResourceUsageInfo.java
new file mode 100644
index 0000000000000..ae58f712b63c2
--- /dev/null
+++ b/server/src/main/java/org/opensearch/tasks/ResourceUsageInfo.java
@@ -0,0 +1,108 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.tasks;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import java.util.Collections;
+import java.util.EnumMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Thread resource usage information for particular resource stats type.
+ *
+ * It captures the resource usage information like memory, CPU about a particular execution of thread
+ * for a specific stats type.
+ */
+public class ResourceUsageInfo {
+ private static final Logger logger = LogManager.getLogger(ResourceUsageInfo.class);
+ private final EnumMap statsInfo = new EnumMap<>(ResourceStats.class);
+
+ public ResourceUsageInfo(ResourceUsageMetric... resourceUsageMetrics) {
+ for (ResourceUsageMetric resourceUsageMetric : resourceUsageMetrics) {
+ this.statsInfo.put(resourceUsageMetric.getStats(), new ResourceStatsInfo(resourceUsageMetric.getValue()));
+ }
+ }
+
+ public void recordResourceUsageMetrics(ResourceUsageMetric... resourceUsageMetrics) {
+ for (ResourceUsageMetric resourceUsageMetric : resourceUsageMetrics) {
+ final ResourceStatsInfo resourceStatsInfo = statsInfo.get(resourceUsageMetric.getStats());
+ if (resourceStatsInfo != null) {
+ updateResourceUsageInfo(resourceStatsInfo, resourceUsageMetric);
+ } else {
+ throw new IllegalStateException(
+ "cannot update ["
+ + resourceUsageMetric.getStats().toString()
+ + "] entry as its not present current_stats_info:"
+ + statsInfo
+ );
+ }
+ }
+ }
+
+ private void updateResourceUsageInfo(ResourceStatsInfo resourceStatsInfo, ResourceUsageMetric resourceUsageMetric) {
+ long currentEndValue;
+ long newEndValue;
+ do {
+ currentEndValue = resourceStatsInfo.endValue.get();
+ newEndValue = resourceUsageMetric.getValue();
+ if (currentEndValue > newEndValue) {
+ logger.debug(
+ "dropping resource usage update as the new value is lower than current value ["
+ + "resource_stats=[{}], "
+ + "current_end_value={}, "
+ + "new_end_value={}]",
+ resourceUsageMetric.getStats(),
+ currentEndValue,
+ newEndValue
+ );
+ return;
+ }
+ } while (!resourceStatsInfo.endValue.compareAndSet(currentEndValue, newEndValue));
+ logger.debug(
+ "updated resource usage info [resource_stats=[{}], " + "old_end_value={}, new_end_value={}]",
+ resourceUsageMetric.getStats(),
+ currentEndValue,
+ newEndValue
+ );
+ }
+
+ public Map getStatsInfo() {
+ return Collections.unmodifiableMap(statsInfo);
+ }
+
+ @Override
+ public String toString() {
+ return statsInfo.toString();
+ }
+
+ /**
+ * Defines resource stats information.
+ */
+ static class ResourceStatsInfo {
+ private final long startValue;
+ private final AtomicLong endValue;
+
+ private ResourceStatsInfo(long startValue) {
+ this.startValue = startValue;
+ this.endValue = new AtomicLong(startValue);
+ }
+
+ public long getTotalValue() {
+ return endValue.get() - startValue;
+ }
+
+ @Override
+ public String toString() {
+ return String.valueOf(getTotalValue());
+ }
+ }
+}
diff --git a/server/src/main/java/org/opensearch/tasks/ResourceUsageMetric.java b/server/src/main/java/org/opensearch/tasks/ResourceUsageMetric.java
new file mode 100644
index 0000000000000..0d13ffe6ec01a
--- /dev/null
+++ b/server/src/main/java/org/opensearch/tasks/ResourceUsageMetric.java
@@ -0,0 +1,27 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.tasks;
+
+public class ResourceUsageMetric {
+ private final ResourceStats stats;
+ private final long value;
+
+ public ResourceUsageMetric(ResourceStats stats, long value) {
+ this.stats = stats;
+ this.value = value;
+ }
+
+ public ResourceStats getStats() {
+ return stats;
+ }
+
+ public long getValue() {
+ return value;
+ }
+}
diff --git a/server/src/main/java/org/opensearch/tasks/Task.java b/server/src/main/java/org/opensearch/tasks/Task.java
index ad9d5c3f04411..62453d08724ce 100644
--- a/server/src/main/java/org/opensearch/tasks/Task.java
+++ b/server/src/main/java/org/opensearch/tasks/Task.java
@@ -32,6 +32,8 @@
package org.opensearch.tasks;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
import org.opensearch.action.ActionResponse;
import org.opensearch.cluster.node.DiscoveryNode;
import org.opensearch.common.io.stream.NamedWriteable;
@@ -39,18 +41,27 @@
import org.opensearch.common.xcontent.ToXContentObject;
import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
/**
* Current task information
*/
public class Task {
+ private static final Logger logger = LogManager.getLogger(Task.class);
+
/**
* The request header to mark tasks with specific ids
*/
public static final String X_OPAQUE_ID = "X-Opaque-Id";
+ private static final String TOTAL = "total";
+
private final long id;
private final String type;
@@ -63,6 +74,8 @@ public class Task {
private final Map headers;
+ private final Map> resourceStats;
+
/**
* The task's start time as a wall clock time since epoch ({@link System#currentTimeMillis()} style).
*/
@@ -74,7 +87,7 @@ public class Task {
private final long startTimeNanos;
public Task(long id, String type, String action, String description, TaskId parentTask, Map headers) {
- this(id, type, action, description, parentTask, System.currentTimeMillis(), System.nanoTime(), headers);
+ this(id, type, action, description, parentTask, System.currentTimeMillis(), System.nanoTime(), headers, new ConcurrentHashMap<>());
}
public Task(
@@ -85,7 +98,8 @@ public Task(
TaskId parentTask,
long startTime,
long startTimeNanos,
- Map headers
+ Map headers,
+ ConcurrentHashMap> resourceStats
) {
this.id = id;
this.type = type;
@@ -95,6 +109,7 @@ public Task(
this.startTime = startTime;
this.startTimeNanos = startTimeNanos;
this.headers = headers;
+ this.resourceStats = resourceStats;
}
/**
@@ -108,19 +123,48 @@ public Task(
* generate data?
*/
public final TaskInfo taskInfo(String localNodeId, boolean detailed) {
+ return taskInfo(localNodeId, detailed, detailed == false);
+ }
+
+ /**
+ * Build a version of the task status you can throw over the wire and back
+ * with the option to include resource stats or not.
+ * This method is only used during creating TaskResult to avoid storing resource information into the task index.
+ *
+ * @param excludeStats should information exclude resource stats.
+ * By default, detailed flag is used to control including resource information.
+ * But inorder to avoid storing resource stats into task index as strict mapping is enforced and breaks when adding this field.
+ * In the future, task-index-mapping.json can be modified to add resource stats.
+ */
+ private TaskInfo taskInfo(String localNodeId, boolean detailed, boolean excludeStats) {
String description = null;
Task.Status status = null;
+ TaskResourceStats resourceStats = null;
if (detailed) {
description = getDescription();
status = getStatus();
}
- return taskInfo(localNodeId, description, status);
+ if (excludeStats == false) {
+ resourceStats = new TaskResourceStats(new HashMap<>() {
+ {
+ put(TOTAL, getTotalResourceStats());
+ }
+ });
+ }
+ return taskInfo(localNodeId, description, status, resourceStats);
}
/**
- * Build a proper {@link TaskInfo} for this task.
+ * Build a {@link TaskInfo} for this task without resource stats.
*/
protected final TaskInfo taskInfo(String localNodeId, String description, Status status) {
+ return taskInfo(localNodeId, description, status, null);
+ }
+
+ /**
+ * Build a proper {@link TaskInfo} for this task.
+ */
+ protected final TaskInfo taskInfo(String localNodeId, String description, Status status, TaskResourceStats resourceStats) {
return new TaskInfo(
new TaskId(localNodeId, getId()),
getType(),
@@ -132,7 +176,8 @@ protected final TaskInfo taskInfo(String localNodeId, String description, Status
this instanceof CancellableTask,
this instanceof CancellableTask && ((CancellableTask) this).isCancelled(),
parentTask,
- headers
+ headers,
+ resourceStats
);
}
@@ -195,6 +240,102 @@ public Status getStatus() {
return null;
}
+ /**
+ * Returns thread level resource consumption of the task
+ */
+ public Map> getResourceStats() {
+ return Collections.unmodifiableMap(resourceStats);
+ }
+
+ /**
+ * Returns current total resource usage of the task.
+ * Currently, this method is only called on demand, during get and listing of tasks.
+ * In the future, these values can be cached as an optimization.
+ */
+ public TaskResourceUsage getTotalResourceStats() {
+ return new TaskResourceUsage(getTotalResourceUtilization(ResourceStats.CPU), getTotalResourceUtilization(ResourceStats.MEMORY));
+ }
+
+ /**
+ * Returns total resource consumption for a specific task stat.
+ */
+ public long getTotalResourceUtilization(ResourceStats stats) {
+ long totalResourceConsumption = 0L;
+ for (List threadResourceInfosList : resourceStats.values()) {
+ for (ThreadResourceInfo threadResourceInfo : threadResourceInfosList) {
+ final ResourceUsageInfo.ResourceStatsInfo statsInfo = threadResourceInfo.getResourceUsageInfo().getStatsInfo().get(stats);
+ if (threadResourceInfo.getStatsType().isOnlyForAnalysis() == false && statsInfo != null) {
+ totalResourceConsumption += statsInfo.getTotalValue();
+ }
+ }
+ }
+ return totalResourceConsumption;
+ }
+
+ /**
+ * Adds thread's starting resource consumption information
+ * @param threadId ID of the thread
+ * @param statsType stats type
+ * @param resourceUsageMetrics resource consumption metrics of the thread
+ * @throws IllegalStateException matching active thread entry was found which is not expected.
+ */
+ public void startThreadResourceTracking(long threadId, ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) {
+ final List threadResourceInfoList = resourceStats.computeIfAbsent(threadId, k -> new ArrayList<>());
+ // active thread entry should not be present in the list
+ for (ThreadResourceInfo threadResourceInfo : threadResourceInfoList) {
+ if (threadResourceInfo.getStatsType() == statsType && threadResourceInfo.isActive()) {
+ throw new IllegalStateException(
+ "unexpected active thread resource entry present [" + threadId + "]:[" + threadResourceInfo + "]"
+ );
+ }
+ }
+ threadResourceInfoList.add(new ThreadResourceInfo(statsType, resourceUsageMetrics));
+ }
+
+ /**
+ * This method is used to update the resource consumption stats so that the data isn't too stale for long-running task.
+ * If active thread entry is present in the list, the entry is updated. If one is not found, it throws an exception.
+ * @param threadId ID of the thread
+ * @param statsType stats type
+ * @param resourceUsageMetrics resource consumption metrics of the thread
+ * @throws IllegalStateException if no matching active thread entry was found.
+ */
+ public void updateThreadResourceStats(long threadId, ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) {
+ final List threadResourceInfoList = resourceStats.get(threadId);
+ if (threadResourceInfoList != null) {
+ for (ThreadResourceInfo threadResourceInfo : threadResourceInfoList) {
+ // the active entry present in the list is updated
+ if (threadResourceInfo.getStatsType() == statsType && threadResourceInfo.isActive()) {
+ threadResourceInfo.recordResourceUsageMetrics(resourceUsageMetrics);
+ return;
+ }
+ }
+ }
+ throw new IllegalStateException("cannot update if active thread resource entry is not present");
+ }
+
+ /**
+ * Record the thread's final resource consumption values.
+ * If active thread entry is present in the list, the entry is updated. If one is not found, it throws an exception.
+ * @param threadId ID of the thread
+ * @param statsType stats type
+ * @param resourceUsageMetrics resource consumption metrics of the thread
+ * @throws IllegalStateException if no matching active thread entry was found.
+ */
+ public void stopThreadResourceTracking(long threadId, ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) {
+ final List threadResourceInfoList = resourceStats.get(threadId);
+ if (threadResourceInfoList != null) {
+ for (ThreadResourceInfo threadResourceInfo : threadResourceInfoList) {
+ if (threadResourceInfo.getStatsType() == statsType && threadResourceInfo.isActive()) {
+ threadResourceInfo.setActive(false);
+ threadResourceInfo.recordResourceUsageMetrics(resourceUsageMetrics);
+ return;
+ }
+ }
+ }
+ throw new IllegalStateException("cannot update final values if active thread resource entry is not present");
+ }
+
/**
* Report of the internal status of a task. These can vary wildly from task
* to task because each task is implemented differently but we should try
@@ -217,12 +358,12 @@ public String getHeader(String header) {
}
public TaskResult result(DiscoveryNode node, Exception error) throws IOException {
- return new TaskResult(taskInfo(node.getId(), true), error);
+ return new TaskResult(taskInfo(node.getId(), true, true), error);
}
public TaskResult result(DiscoveryNode node, ActionResponse response) throws IOException {
if (response instanceof ToXContent) {
- return new TaskResult(taskInfo(node.getId(), true), (ToXContent) response);
+ return new TaskResult(taskInfo(node.getId(), true, true), (ToXContent) response);
} else {
throw new IllegalStateException("response has to implement ToXContent to be able to store the results");
}
diff --git a/server/src/main/java/org/opensearch/tasks/TaskInfo.java b/server/src/main/java/org/opensearch/tasks/TaskInfo.java
index cf77eaf540ee6..e6ba94a71b61d 100644
--- a/server/src/main/java/org/opensearch/tasks/TaskInfo.java
+++ b/server/src/main/java/org/opensearch/tasks/TaskInfo.java
@@ -86,6 +86,8 @@ public final class TaskInfo implements Writeable, ToXContentFragment {
private final Map headers;
+ private final TaskResourceStats resourceStats;
+
public TaskInfo(
TaskId taskId,
String type,
@@ -97,7 +99,8 @@ public TaskInfo(
boolean cancellable,
boolean cancelled,
TaskId parentTaskId,
- Map headers
+ Map headers,
+ TaskResourceStats resourceStats
) {
if (cancellable == false && cancelled == true) {
throw new IllegalArgumentException("task cannot be cancelled");
@@ -113,11 +116,13 @@ public TaskInfo(
this.cancelled = cancelled;
this.parentTaskId = parentTaskId;
this.headers = headers;
+ this.resourceStats = resourceStats;
}
/**
* Read from a stream.
*/
+ @SuppressWarnings("unchecked")
public TaskInfo(StreamInput in) throws IOException {
taskId = TaskId.readFromStream(in);
type = in.readString();
@@ -137,6 +142,11 @@ public TaskInfo(StreamInput in) throws IOException {
}
parentTaskId = TaskId.readFromStream(in);
headers = in.readMap(StreamInput::readString, StreamInput::readString);
+ if (in.getVersion().onOrAfter(Version.V_2_0_0)) {
+ resourceStats = in.readOptionalWriteable(TaskResourceStats::new);
+ } else {
+ resourceStats = null;
+ }
}
@Override
@@ -154,6 +164,9 @@ public void writeTo(StreamOutput out) throws IOException {
}
parentTaskId.writeTo(out);
out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString);
+ if (out.getVersion().onOrAfter(Version.V_2_0_0)) {
+ out.writeOptionalWriteable(resourceStats);
+ }
}
public TaskId getTaskId() {
@@ -226,6 +239,13 @@ public Map getHeaders() {
return headers;
}
+ /**
+ * Returns the task resource information
+ */
+ public TaskResourceStats getResourceStats() {
+ return resourceStats;
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("node", taskId.getNodeId());
@@ -253,6 +273,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
builder.field(attribute.getKey(), attribute.getValue());
}
builder.endObject();
+ if (resourceStats != null) {
+ builder.startObject("resource_stats");
+ resourceStats.toXContent(builder, params);
+ builder.endObject();
+ }
return builder;
}
@@ -278,6 +303,8 @@ public static TaskInfo fromXContent(XContentParser parser) {
// This might happen if we are reading an old version of task info
headers = Collections.emptyMap();
}
+ @SuppressWarnings("unchecked")
+ TaskResourceStats resourceStats = (TaskResourceStats) a[i++];
RawTaskStatus status = statusBytes == null ? null : new RawTaskStatus(statusBytes);
TaskId parentTaskId = parentTaskIdString == null ? TaskId.EMPTY_TASK_ID : new TaskId(parentTaskIdString);
return new TaskInfo(
@@ -291,7 +318,8 @@ public static TaskInfo fromXContent(XContentParser parser) {
cancellable,
cancelled,
parentTaskId,
- headers
+ headers,
+ resourceStats
);
});
static {
@@ -309,6 +337,7 @@ public static TaskInfo fromXContent(XContentParser parser) {
PARSER.declareBoolean(optionalConstructorArg(), new ParseField("cancelled"));
PARSER.declareString(optionalConstructorArg(), new ParseField("parent_task_id"));
PARSER.declareObject(optionalConstructorArg(), (p, c) -> p.mapStrings(), new ParseField("headers"));
+ PARSER.declareObject(optionalConstructorArg(), (p, c) -> TaskResourceStats.fromXContent(p), new ParseField("resource_stats"));
}
@Override
@@ -333,7 +362,8 @@ public boolean equals(Object obj) {
&& Objects.equals(cancellable, other.cancellable)
&& Objects.equals(cancelled, other.cancelled)
&& Objects.equals(status, other.status)
- && Objects.equals(headers, other.headers);
+ && Objects.equals(headers, other.headers)
+ && Objects.equals(resourceStats, other.resourceStats);
}
@Override
@@ -349,7 +379,8 @@ public int hashCode() {
cancellable,
cancelled,
status,
- headers
+ headers,
+ resourceStats
);
}
}
diff --git a/server/src/main/java/org/opensearch/tasks/TaskResourceStats.java b/server/src/main/java/org/opensearch/tasks/TaskResourceStats.java
new file mode 100644
index 0000000000000..c35e08ebb34ec
--- /dev/null
+++ b/server/src/main/java/org/opensearch/tasks/TaskResourceStats.java
@@ -0,0 +1,106 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.tasks;
+
+import org.opensearch.common.Strings;
+import org.opensearch.common.io.stream.StreamInput;
+import org.opensearch.common.io.stream.StreamOutput;
+import org.opensearch.common.io.stream.Writeable;
+import org.opensearch.common.xcontent.ToXContentFragment;
+import org.opensearch.common.xcontent.XContentBuilder;
+import org.opensearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+/**
+ * Resource information about a currently running task.
+ *
+ * Writeable TaskResourceStats objects are used to represent resource
+ * snapshot information about currently running task.
+ */
+public class TaskResourceStats implements Writeable, ToXContentFragment {
+ private final Map resourceUsage;
+
+ public TaskResourceStats(Map resourceUsage) {
+ this.resourceUsage = Objects.requireNonNull(resourceUsage, "resource usage is required");
+ }
+
+ /**
+ * Read from a stream.
+ */
+ public TaskResourceStats(StreamInput in) throws IOException {
+ resourceUsage = in.readMap(StreamInput::readString, TaskResourceUsage::readFromStream);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeMap(resourceUsage, StreamOutput::writeString, (stream, stats) -> stats.writeTo(stream));
+ }
+
+ public Map getResourceUsageInfo() {
+ return resourceUsage;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ for (Map.Entry resourceUsageEntry : resourceUsage.entrySet()) {
+ builder.startObject(resourceUsageEntry.getKey());
+ if (resourceUsageEntry.getValue() != null) {
+ resourceUsageEntry.getValue().toXContent(builder, params);
+ }
+ builder.endObject();
+ }
+ return builder;
+ }
+
+ public static TaskResourceStats fromXContent(XContentParser parser) throws IOException {
+ XContentParser.Token token = parser.currentToken();
+ if (token == null) {
+ token = parser.nextToken();
+ }
+ if (token == XContentParser.Token.START_OBJECT) {
+ token = parser.nextToken();
+ }
+ final Map resourceStats = new HashMap<>();
+ if (token == XContentParser.Token.FIELD_NAME) {
+ assert parser.currentToken() == XContentParser.Token.FIELD_NAME : "Expected field name but saw [" + parser.currentToken() + "]";
+ do {
+ // Must point to field name
+ String fieldName = parser.currentName();
+ // And then the value
+ TaskResourceUsage value = TaskResourceUsage.fromXContent(parser);
+ resourceStats.put(fieldName, value);
+ } while (parser.nextToken() == XContentParser.Token.FIELD_NAME);
+ }
+ return new TaskResourceStats(resourceStats);
+ }
+
+ @Override
+ public String toString() {
+ return Strings.toString(this, true, true);
+ }
+
+ // Implements equals and hashcode for testing
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null || obj.getClass() != TaskResourceStats.class) {
+ return false;
+ }
+ TaskResourceStats other = (TaskResourceStats) obj;
+ return Objects.equals(resourceUsage, other.resourceUsage);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(resourceUsage);
+ }
+}
diff --git a/server/src/main/java/org/opensearch/tasks/TaskResourceUsage.java b/server/src/main/java/org/opensearch/tasks/TaskResourceUsage.java
new file mode 100644
index 0000000000000..6af3de2b78c06
--- /dev/null
+++ b/server/src/main/java/org/opensearch/tasks/TaskResourceUsage.java
@@ -0,0 +1,105 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.tasks;
+
+import org.opensearch.common.ParseField;
+import org.opensearch.common.Strings;
+import org.opensearch.common.io.stream.StreamInput;
+import org.opensearch.common.io.stream.StreamOutput;
+import org.opensearch.common.io.stream.Writeable;
+import org.opensearch.common.xcontent.ConstructingObjectParser;
+import org.opensearch.common.xcontent.ToXContentFragment;
+import org.opensearch.common.xcontent.XContentBuilder;
+import org.opensearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.Objects;
+
+import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg;
+
+/**
+ * Task resource usage information
+ *
+ * Writeable TaskResourceUsage objects are used to represent resource usage
+ * information of running tasks.
+ */
+public class TaskResourceUsage implements Writeable, ToXContentFragment {
+ private static final ParseField CPU_TIME_IN_NANOS = new ParseField("cpu_time_in_nanos");
+ private static final ParseField MEMORY_IN_BYTES = new ParseField("memory_in_bytes");
+
+ private final long cpuTimeInNanos;
+ private final long memoryInBytes;
+
+ public TaskResourceUsage(long cpuTimeInNanos, long memoryInBytes) {
+ this.cpuTimeInNanos = cpuTimeInNanos;
+ this.memoryInBytes = memoryInBytes;
+ }
+
+ /**
+ * Read from a stream.
+ */
+ public static TaskResourceUsage readFromStream(StreamInput in) throws IOException {
+ return new TaskResourceUsage(in.readVLong(), in.readVLong());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(cpuTimeInNanos);
+ out.writeVLong(memoryInBytes);
+ }
+
+ public long getCpuTimeInNanos() {
+ return cpuTimeInNanos;
+ }
+
+ public long getMemoryInBytes() {
+ return memoryInBytes;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field(CPU_TIME_IN_NANOS.getPreferredName(), cpuTimeInNanos);
+ builder.field(MEMORY_IN_BYTES.getPreferredName(), memoryInBytes);
+ return builder;
+ }
+
+ public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(
+ "task_resource_usage",
+ a -> new TaskResourceUsage((Long) a[0], (Long) a[1])
+ );
+
+ static {
+ PARSER.declareLong(constructorArg(), CPU_TIME_IN_NANOS);
+ PARSER.declareLong(constructorArg(), MEMORY_IN_BYTES);
+ }
+
+ public static TaskResourceUsage fromXContent(XContentParser parser) {
+ return PARSER.apply(parser, null);
+ }
+
+ @Override
+ public String toString() {
+ return Strings.toString(this, true, true);
+ }
+
+ // Implements equals and hashcode for testing
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null || obj.getClass() != TaskResourceUsage.class) {
+ return false;
+ }
+ TaskResourceUsage other = (TaskResourceUsage) obj;
+ return Objects.equals(cpuTimeInNanos, other.cpuTimeInNanos) && Objects.equals(memoryInBytes, other.memoryInBytes);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(cpuTimeInNanos, memoryInBytes);
+ }
+}
diff --git a/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java b/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java
new file mode 100644
index 0000000000000..8b45c38c8fb63
--- /dev/null
+++ b/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java
@@ -0,0 +1,54 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.tasks;
+
+/**
+ * Resource consumption information about a particular execution of thread.
+ *
+ * It captures the resource usage information about a particular execution of thread
+ * for a specific stats type like worker_stats or response_stats etc.,
+ */
+public class ThreadResourceInfo {
+ private volatile boolean isActive = true;
+ private final ResourceStatsType statsType;
+ private final ResourceUsageInfo resourceUsageInfo;
+
+ public ThreadResourceInfo(ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) {
+ this.statsType = statsType;
+ this.resourceUsageInfo = new ResourceUsageInfo(resourceUsageMetrics);
+ }
+
+ /**
+ * Updates thread's resource consumption information.
+ */
+ public void recordResourceUsageMetrics(ResourceUsageMetric... resourceUsageMetrics) {
+ resourceUsageInfo.recordResourceUsageMetrics(resourceUsageMetrics);
+ }
+
+ public void setActive(boolean isActive) {
+ this.isActive = isActive;
+ }
+
+ public boolean isActive() {
+ return isActive;
+ }
+
+ public ResourceStatsType getStatsType() {
+ return statsType;
+ }
+
+ public ResourceUsageInfo getResourceUsageInfo() {
+ return resourceUsageInfo;
+ }
+
+ @Override
+ public String toString() {
+ return resourceUsageInfo + ", stats_type=" + statsType + ", is_active=" + isActive;
+ }
+}
diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskTests.java
index 5f8d5992c9f2f..45db94577f15f 100644
--- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskTests.java
+++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskTests.java
@@ -31,16 +31,23 @@
package org.opensearch.action.admin.cluster.node.tasks;
+import org.opensearch.action.search.SearchAction;
import org.opensearch.common.bytes.BytesArray;
import org.opensearch.common.xcontent.XContentHelper;
+import org.opensearch.tasks.Task;
import org.opensearch.tasks.TaskId;
import org.opensearch.tasks.TaskInfo;
+import org.opensearch.tasks.ResourceUsageMetric;
+import org.opensearch.tasks.ResourceStats;
+import org.opensearch.tasks.ResourceStatsType;
import org.opensearch.test.OpenSearchTestCase;
import java.nio.charset.StandardCharsets;
import java.util.Collections;
import java.util.Map;
+import static org.opensearch.tasks.TaskInfoTests.randomResourceStats;
+
public class TaskTests extends OpenSearchTestCase {
public void testTaskInfoToString() {
@@ -61,7 +68,8 @@ public void testTaskInfoToString() {
cancellable,
cancelled,
TaskId.EMPTY_TASK_ID,
- Collections.singletonMap("foo", "bar")
+ Collections.singletonMap("foo", "bar"),
+ randomResourceStats(randomBoolean())
);
String taskInfoString = taskInfo.toString();
Map map = XContentHelper.convertToMap(new BytesArray(taskInfoString.getBytes(StandardCharsets.UTF_8)), true).v2();
@@ -94,7 +102,8 @@ public void testCancellableOptionWhenCancelledTrue() {
cancellable,
cancelled,
TaskId.EMPTY_TASK_ID,
- Collections.singletonMap("foo", "bar")
+ Collections.singletonMap("foo", "bar"),
+ randomResourceStats(randomBoolean())
);
String taskInfoString = taskInfo.toString();
Map map = XContentHelper.convertToMap(new BytesArray(taskInfoString.getBytes(StandardCharsets.UTF_8)), true).v2();
@@ -120,7 +129,8 @@ public void testCancellableOptionWhenCancelledFalse() {
cancellable,
cancelled,
TaskId.EMPTY_TASK_ID,
- Collections.singletonMap("foo", "bar")
+ Collections.singletonMap("foo", "bar"),
+ randomResourceStats(randomBoolean())
);
String taskInfoString = taskInfo.toString();
Map map = XContentHelper.convertToMap(new BytesArray(taskInfoString.getBytes(StandardCharsets.UTF_8)), true).v2();
@@ -148,9 +158,75 @@ public void testNonCancellableOption() {
cancellable,
cancelled,
TaskId.EMPTY_TASK_ID,
- Collections.singletonMap("foo", "bar")
+ Collections.singletonMap("foo", "bar"),
+ randomResourceStats(randomBoolean())
)
);
assertEquals(e.getMessage(), "task cannot be cancelled");
}
+
+ public void testTaskResourceStats() {
+ final Task task = new Task(
+ randomLong(),
+ "transport",
+ SearchAction.NAME,
+ "description",
+ new TaskId(randomLong() + ":" + randomLong()),
+ Collections.emptyMap()
+ );
+
+ long totalMemory = 0L;
+ long totalCPU = 0L;
+
+ // reporting resource consumption events and checking total consumption values
+ for (int i = 0; i < randomInt(10); i++) {
+ long initial_memory = randomLongBetween(1, 100);
+ long initial_cpu = randomLongBetween(1, 100);
+
+ ResourceUsageMetric[] initialTaskResourceMetrics = new ResourceUsageMetric[] {
+ new ResourceUsageMetric(ResourceStats.MEMORY, initial_memory),
+ new ResourceUsageMetric(ResourceStats.CPU, initial_cpu) };
+ task.startThreadResourceTracking(i, ResourceStatsType.WORKER_STATS, initialTaskResourceMetrics);
+
+ long memory = initial_memory + randomLongBetween(1, 10000);
+ long cpu = initial_cpu + randomLongBetween(1, 10000);
+
+ totalMemory += memory - initial_memory;
+ totalCPU += cpu - initial_cpu;
+
+ ResourceUsageMetric[] taskResourceMetrics = new ResourceUsageMetric[] {
+ new ResourceUsageMetric(ResourceStats.MEMORY, memory),
+ new ResourceUsageMetric(ResourceStats.CPU, cpu) };
+ task.updateThreadResourceStats(i, ResourceStatsType.WORKER_STATS, taskResourceMetrics);
+ task.stopThreadResourceTracking(i, ResourceStatsType.WORKER_STATS);
+ }
+ assertEquals(task.getTotalResourceStats().getMemoryInBytes(), totalMemory);
+ assertEquals(task.getTotalResourceStats().getCpuTimeInNanos(), totalCPU);
+
+ // updating should throw an IllegalStateException when active entry is not present.
+ try {
+ task.updateThreadResourceStats(randomInt(), ResourceStatsType.WORKER_STATS);
+ fail("update should not be successful as active entry is not present!");
+ } catch (IllegalStateException e) {
+ // pass
+ }
+
+ // re-adding a thread entry that is already present, should throw an exception
+ int threadId = randomInt();
+ task.startThreadResourceTracking(threadId, ResourceStatsType.WORKER_STATS, new ResourceUsageMetric(ResourceStats.MEMORY, 100));
+ try {
+ task.startThreadResourceTracking(threadId, ResourceStatsType.WORKER_STATS);
+ fail("add/start should not be successful as active entry is already present!");
+ } catch (IllegalStateException e) {
+ // pass
+ }
+
+ // existing active entry is present only for memory, update cannot be called with cpu values.
+ try {
+ task.updateThreadResourceStats(threadId, ResourceStatsType.WORKER_STATS, new ResourceUsageMetric(ResourceStats.CPU, 200));
+ fail("update should not be successful as entry for CPU is not present!");
+ } catch (IllegalStateException e) {
+ // pass
+ }
+ }
}
diff --git a/server/src/test/java/org/opensearch/tasks/CancelTasksResponseTests.java b/server/src/test/java/org/opensearch/tasks/CancelTasksResponseTests.java
index 64d2979c2c5a0..c0ec4ca3d31fd 100644
--- a/server/src/test/java/org/opensearch/tasks/CancelTasksResponseTests.java
+++ b/server/src/test/java/org/opensearch/tasks/CancelTasksResponseTests.java
@@ -62,7 +62,7 @@ protected CancelTasksResponse createTestInstance() {
private static List randomTasks() {
List randomTasks = new ArrayList<>();
for (int i = 0; i < randomInt(10); i++) {
- randomTasks.add(TaskInfoTests.randomTaskInfo());
+ randomTasks.add(TaskInfoTests.randomTaskInfo(false));
}
return randomTasks;
}
diff --git a/server/src/test/java/org/opensearch/tasks/ListTasksResponseTests.java b/server/src/test/java/org/opensearch/tasks/ListTasksResponseTests.java
index 4d5feb46de1d0..0201509d03a2b 100644
--- a/server/src/test/java/org/opensearch/tasks/ListTasksResponseTests.java
+++ b/server/src/test/java/org/opensearch/tasks/ListTasksResponseTests.java
@@ -45,6 +45,7 @@
import java.net.ConnectException;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
import java.util.function.Predicate;
import java.util.function.Supplier;
@@ -72,7 +73,12 @@ public void testNonEmptyToString() {
true,
false,
new TaskId("node1", 0),
- Collections.singletonMap("foo", "bar")
+ Collections.singletonMap("foo", "bar"),
+ new TaskResourceStats(new HashMap() {
+ {
+ put("dummy-type1", new TaskResourceUsage(100, 100));
+ }
+ })
);
ListTasksResponse tasksResponse = new ListTasksResponse(singletonList(info), emptyList(), emptyList());
assertEquals(
@@ -93,6 +99,12 @@ public void testNonEmptyToString() {
+ " \"parent_task_id\" : \"node1:0\",\n"
+ " \"headers\" : {\n"
+ " \"foo\" : \"bar\"\n"
+ + " },\n"
+ + " \"resource_stats\" : {\n"
+ + " \"dummy-type1\" : {\n"
+ + " \"cpu_time_in_nanos\" : 100,\n"
+ + " \"memory_in_bytes\" : 100\n"
+ + " }\n"
+ " }\n"
+ " }\n"
+ " ]\n"
@@ -127,8 +139,8 @@ protected boolean supportsUnknownFields() {
@Override
protected Predicate getRandomFieldsExcludeFilter() {
- // status and headers hold arbitrary content, we can't inject random fields in them
- return field -> field.endsWith("status") || field.endsWith("headers");
+ // status, headers and resource_stats hold arbitrary content, we can't inject random fields in them
+ return field -> field.endsWith("status") || field.endsWith("headers") || field.contains("resource_stats");
}
@Override
diff --git a/server/src/test/java/org/opensearch/tasks/TaskInfoTests.java b/server/src/test/java/org/opensearch/tasks/TaskInfoTests.java
index 89b690d81a4ea..7c8cb3230659b 100644
--- a/server/src/test/java/org/opensearch/tasks/TaskInfoTests.java
+++ b/server/src/test/java/org/opensearch/tasks/TaskInfoTests.java
@@ -77,13 +77,13 @@ protected boolean supportsUnknownFields() {
@Override
protected Predicate getRandomFieldsExcludeFilter() {
- // status and headers hold arbitrary content, we can't inject random fields in them
- return field -> "status".equals(field) || "headers".equals(field);
+ // status, headers and resource_stats hold arbitrary content, we can't inject random fields in them
+ return field -> "status".equals(field) || "headers".equals(field) || field.contains("resource_stats");
}
@Override
protected TaskInfo mutateInstance(TaskInfo info) {
- switch (between(0, 9)) {
+ switch (between(0, 10)) {
case 0:
TaskId taskId = new TaskId(info.getTaskId().getNodeId() + randomAlphaOfLength(5), info.getTaskId().getId());
return new TaskInfo(
@@ -97,7 +97,8 @@ protected TaskInfo mutateInstance(TaskInfo info) {
info.isCancellable(),
info.isCancelled(),
info.getParentTaskId(),
- info.getHeaders()
+ info.getHeaders(),
+ info.getResourceStats()
);
case 1:
return new TaskInfo(
@@ -111,7 +112,8 @@ protected TaskInfo mutateInstance(TaskInfo info) {
info.isCancellable(),
info.isCancelled(),
info.getParentTaskId(),
- info.getHeaders()
+ info.getHeaders(),
+ info.getResourceStats()
);
case 2:
return new TaskInfo(
@@ -125,7 +127,8 @@ protected TaskInfo mutateInstance(TaskInfo info) {
info.isCancellable(),
info.isCancelled(),
info.getParentTaskId(),
- info.getHeaders()
+ info.getHeaders(),
+ info.getResourceStats()
);
case 3:
return new TaskInfo(
@@ -139,7 +142,8 @@ protected TaskInfo mutateInstance(TaskInfo info) {
info.isCancellable(),
info.isCancelled(),
info.getParentTaskId(),
- info.getHeaders()
+ info.getHeaders(),
+ info.getResourceStats()
);
case 4:
Task.Status newStatus = randomValueOtherThan(info.getStatus(), TaskInfoTests::randomRawTaskStatus);
@@ -154,7 +158,8 @@ protected TaskInfo mutateInstance(TaskInfo info) {
info.isCancellable(),
info.isCancelled(),
info.getParentTaskId(),
- info.getHeaders()
+ info.getHeaders(),
+ info.getResourceStats()
);
case 5:
return new TaskInfo(
@@ -168,7 +173,8 @@ protected TaskInfo mutateInstance(TaskInfo info) {
info.isCancellable(),
info.isCancelled(),
info.getParentTaskId(),
- info.getHeaders()
+ info.getHeaders(),
+ info.getResourceStats()
);
case 6:
return new TaskInfo(
@@ -182,7 +188,8 @@ protected TaskInfo mutateInstance(TaskInfo info) {
info.isCancellable(),
info.isCancelled(),
info.getParentTaskId(),
- info.getHeaders()
+ info.getHeaders(),
+ info.getResourceStats()
);
case 7:
return new TaskInfo(
@@ -196,7 +203,8 @@ protected TaskInfo mutateInstance(TaskInfo info) {
info.isCancellable() == false,
false,
info.getParentTaskId(),
- info.getHeaders()
+ info.getHeaders(),
+ info.getResourceStats()
);
case 8:
TaskId parentId = new TaskId(info.getParentTaskId().getNodeId() + randomAlphaOfLength(5), info.getParentTaskId().getId());
@@ -211,7 +219,8 @@ protected TaskInfo mutateInstance(TaskInfo info) {
info.isCancellable(),
info.isCancelled(),
parentId,
- info.getHeaders()
+ info.getHeaders(),
+ info.getResourceStats()
);
case 9:
Map headers = info.getHeaders();
@@ -232,7 +241,30 @@ protected TaskInfo mutateInstance(TaskInfo info) {
info.isCancellable(),
info.isCancelled(),
info.getParentTaskId(),
- headers
+ headers,
+ info.getResourceStats()
+ );
+ case 10:
+ Map resourceUsageMap;
+ if (info.getResourceStats() == null) {
+ resourceUsageMap = new HashMap<>(1);
+ } else {
+ resourceUsageMap = new HashMap<>(info.getResourceStats().getResourceUsageInfo());
+ }
+ resourceUsageMap.put(randomAlphaOfLength(5), new TaskResourceUsage(randomNonNegativeLong(), randomNonNegativeLong()));
+ return new TaskInfo(
+ info.getTaskId(),
+ info.getType(),
+ info.getAction(),
+ info.getDescription(),
+ info.getStatus(),
+ info.getStartTime(),
+ info.getRunningTimeNanos(),
+ info.isCancellable(),
+ info.isCancelled(),
+ info.getParentTaskId(),
+ info.getHeaders(),
+ new TaskResourceStats(resourceUsageMap)
);
default:
throw new IllegalStateException();
@@ -240,11 +272,15 @@ protected TaskInfo mutateInstance(TaskInfo info) {
}
static TaskInfo randomTaskInfo() {
+ return randomTaskInfo(randomBoolean());
+ }
+
+ static TaskInfo randomTaskInfo(boolean detailed) {
TaskId taskId = randomTaskId();
String type = randomAlphaOfLength(5);
String action = randomAlphaOfLength(5);
- Task.Status status = randomBoolean() ? randomRawTaskStatus() : null;
- String description = randomBoolean() ? randomAlphaOfLength(5) : null;
+ Task.Status status = detailed ? randomRawTaskStatus() : null;
+ String description = detailed ? randomAlphaOfLength(5) : null;
long startTime = randomLong();
long runningTimeNanos = randomLong();
boolean cancellable = randomBoolean();
@@ -264,7 +300,8 @@ static TaskInfo randomTaskInfo() {
cancellable,
cancelled,
parentTaskId,
- headers
+ headers,
+ randomResourceStats(detailed)
);
}
@@ -285,4 +322,14 @@ private static RawTaskStatus randomRawTaskStatus() {
throw new IllegalStateException(e);
}
}
+
+ public static TaskResourceStats randomResourceStats(boolean detailed) {
+ return detailed ? new TaskResourceStats(new HashMap() {
+ {
+ for (int i = 0; i < randomInt(5); i++) {
+ put(randomAlphaOfLength(5), new TaskResourceUsage(randomNonNegativeLong(), randomNonNegativeLong()));
+ }
+ }
+ }) : null;
+ }
}
From dd79352baa5d3ca95c5349c72a6f89142f2b6c8e Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 28 Mar 2022 12:40:23 -0500
Subject: [PATCH 12/73] Bump json-schema-validator from 1.0.67 to 1.0.68 in
/buildSrc (#2610)
Bumps [json-schema-validator](https://github.com/networknt/json-schema-validator) from 1.0.67 to 1.0.68.
- [Release notes](https://github.com/networknt/json-schema-validator/releases)
- [Changelog](https://github.com/networknt/json-schema-validator/blob/master/CHANGELOG.md)
- [Commits](https://github.com/networknt/json-schema-validator/compare/1.0.67...1.0.68)
---
updated-dependencies:
- dependency-name: com.networknt:json-schema-validator
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
buildSrc/build.gradle | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle
index f940eec593306..6134e7cd0250e 100644
--- a/buildSrc/build.gradle
+++ b/buildSrc/build.gradle
@@ -115,7 +115,7 @@ dependencies {
api 'de.thetaphi:forbiddenapis:3.2'
api 'com.avast.gradle:gradle-docker-compose-plugin:0.14.12'
api 'org.apache.maven:maven-model:3.6.2'
- api 'com.networknt:json-schema-validator:1.0.67'
+ api 'com.networknt:json-schema-validator:1.0.68'
api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson')}"
testFixturesApi "junit:junit:${props.getProperty('junit')}"
From e44706e500b375396bb9e8450909d0a052ff6589 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 28 Mar 2022 12:42:31 -0500
Subject: [PATCH 13/73] Bump jettison from 1.1 to 1.4.1 in
/plugins/discovery-azure-classic (#2614)
* Bump jettison from 1.1 to 1.4.1 in /plugins/discovery-azure-classic
Bumps [jettison](https://github.com/jettison-json/jettison) from 1.1 to 1.4.1.
- [Release notes](https://github.com/jettison-json/jettison/releases)
- [Commits](https://github.com/jettison-json/jettison/compare/jettison-1.1...jettison-1.4.1)
---
updated-dependencies:
- dependency-name: org.codehaus.jettison:jettison
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
* Updating SHAs
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot[bot]
---
plugins/discovery-azure-classic/build.gradle | 2 +-
plugins/discovery-azure-classic/licenses/jettison-1.1.jar.sha1 | 1 -
.../discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1 | 1 +
3 files changed, 2 insertions(+), 2 deletions(-)
delete mode 100644 plugins/discovery-azure-classic/licenses/jettison-1.1.jar.sha1
create mode 100644 plugins/discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1
diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle
index 28cbc647ac31a..575b8858b16ba 100644
--- a/plugins/discovery-azure-classic/build.gradle
+++ b/plugins/discovery-azure-classic/build.gradle
@@ -59,7 +59,7 @@ dependencies {
api "com.sun.jersey:jersey-client:${versions.jersey}"
api "com.sun.jersey:jersey-core:${versions.jersey}"
api "com.sun.jersey:jersey-json:${versions.jersey}"
- api 'org.codehaus.jettison:jettison:1.1'
+ api 'org.codehaus.jettison:jettison:1.4.1'
api 'com.sun.xml.bind:jaxb-impl:2.2.3-1'
// HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here,
diff --git a/plugins/discovery-azure-classic/licenses/jettison-1.1.jar.sha1 b/plugins/discovery-azure-classic/licenses/jettison-1.1.jar.sha1
deleted file mode 100644
index 53133f3b018e6..0000000000000
--- a/plugins/discovery-azure-classic/licenses/jettison-1.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-1a01a2a1218fcf9faa2cc2a6ced025bdea687262
diff --git a/plugins/discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1 b/plugins/discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1
new file mode 100644
index 0000000000000..815d87d917f2e
--- /dev/null
+++ b/plugins/discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1
@@ -0,0 +1 @@
+8d16bbcbac93446942c9e5da04530159afbe3e65
\ No newline at end of file
From 0216ab24356a2bd6f01b1debd6327942b35575ad Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 28 Mar 2022 12:44:43 -0500
Subject: [PATCH 14/73] Bump google-oauth-client from 1.31.0 to 1.33.1 in
/plugins/repository-gcs (#2616)
* Bump google-oauth-client in /plugins/repository-gcs
Bumps [google-oauth-client](https://github.com/googleapis/google-oauth-java-client) from 1.31.0 to 1.33.1.
- [Release notes](https://github.com/googleapis/google-oauth-java-client/releases)
- [Changelog](https://github.com/googleapis/google-oauth-java-client/blob/main/CHANGELOG.md)
- [Commits](https://github.com/googleapis/google-oauth-java-client/compare/v1.31.0...v1.33.1)
---
updated-dependencies:
- dependency-name: com.google.oauth-client:google-oauth-client
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
* Updating SHAs
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot[bot]
---
plugins/repository-gcs/build.gradle | 2 +-
.../repository-gcs/licenses/google-oauth-client-1.31.0.jar.sha1 | 1 -
.../repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1 | 1 +
3 files changed, 2 insertions(+), 2 deletions(-)
delete mode 100644 plugins/repository-gcs/licenses/google-oauth-client-1.31.0.jar.sha1
create mode 100644 plugins/repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1
diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle
index e1ecf3c65a0f9..2cfbd76394bcb 100644
--- a/plugins/repository-gcs/build.gradle
+++ b/plugins/repository-gcs/build.gradle
@@ -69,7 +69,7 @@ dependencies {
api 'com.google.cloud:google-cloud-core-http:1.93.3'
api 'com.google.auth:google-auth-library-credentials:0.20.0'
api 'com.google.auth:google-auth-library-oauth2-http:0.20.0'
- api 'com.google.oauth-client:google-oauth-client:1.31.0'
+ api 'com.google.oauth-client:google-oauth-client:1.33.1'
api 'com.google.api-client:google-api-client:1.30.10'
api 'com.google.http-client:google-http-client-appengine:1.35.0'
api 'com.google.http-client:google-http-client-jackson2:1.35.0'
diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.31.0.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.31.0.jar.sha1
deleted file mode 100644
index 942dbb5d167a4..0000000000000
--- a/plugins/repository-gcs/licenses/google-oauth-client-1.31.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-bf1cfbbaa2497d0a841ea0363df4a61170d5823b
\ No newline at end of file
diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1
new file mode 100644
index 0000000000000..3897a85310ec6
--- /dev/null
+++ b/plugins/repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1
@@ -0,0 +1 @@
+0a431f1a677c5f89507591ab47a7ccdb0b18b6f7
\ No newline at end of file
From 2425f64baab25fe593937f4b78a2964c1797e90a Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 28 Mar 2022 15:02:44 -0400
Subject: [PATCH 15/73] Bump htrace-core4 from 4.1.0-incubating to
4.2.0-incubating in /plugins/repository-hdfs (#2618)
* Bump htrace-core4 in /plugins/repository-hdfs
Bumps htrace-core4 from 4.1.0-incubating to 4.2.0-incubating.
---
updated-dependencies:
- dependency-name: org.apache.htrace:htrace-core4
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
* Updating SHAs
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot[bot]
---
plugins/repository-hdfs/build.gradle | 2 +-
.../licenses/htrace-core4-4.1.0-incubating.jar.sha1 | 1 -
.../licenses/htrace-core4-4.2.0-incubating.jar.sha1 | 1 +
3 files changed, 2 insertions(+), 2 deletions(-)
delete mode 100644 plugins/repository-hdfs/licenses/htrace-core4-4.1.0-incubating.jar.sha1
create mode 100644 plugins/repository-hdfs/licenses/htrace-core4-4.2.0-incubating.jar.sha1
diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle
index dc1f55b686044..19f58bf48366d 100644
--- a/plugins/repository-hdfs/build.gradle
+++ b/plugins/repository-hdfs/build.gradle
@@ -61,7 +61,7 @@ dependencies {
api "org.apache.hadoop:hadoop-client-api:${versions.hadoop3}"
runtimeOnly "org.apache.hadoop:hadoop-client-runtime:${versions.hadoop3}"
api "org.apache.hadoop:hadoop-hdfs:${versions.hadoop3}"
- api 'org.apache.htrace:htrace-core4:4.1.0-incubating'
+ api 'org.apache.htrace:htrace-core4:4.2.0-incubating'
api "org.apache.logging.log4j:log4j-core:${versions.log4j}"
api 'org.apache.avro:avro:1.10.2'
api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}"
diff --git a/plugins/repository-hdfs/licenses/htrace-core4-4.1.0-incubating.jar.sha1 b/plugins/repository-hdfs/licenses/htrace-core4-4.1.0-incubating.jar.sha1
deleted file mode 100644
index 806c624c02cf0..0000000000000
--- a/plugins/repository-hdfs/licenses/htrace-core4-4.1.0-incubating.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-12b3e2adda95e8c41d9d45d33db075137871d2e2
\ No newline at end of file
diff --git a/plugins/repository-hdfs/licenses/htrace-core4-4.2.0-incubating.jar.sha1 b/plugins/repository-hdfs/licenses/htrace-core4-4.2.0-incubating.jar.sha1
new file mode 100644
index 0000000000000..e2eafb09dba00
--- /dev/null
+++ b/plugins/repository-hdfs/licenses/htrace-core4-4.2.0-incubating.jar.sha1
@@ -0,0 +1 @@
+94b3f1966922bc45d0f8a86a2aa867a4b0df288b
\ No newline at end of file
From 932cab67bda39b1b57a2ff72f191847914f6fe13 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 28 Mar 2022 15:03:19 -0400
Subject: [PATCH 16/73] Bump asm-tree from 7.2 to 9.2 in /modules/lang-painless
(#2617)
* Bump asm-tree from 7.2 to 9.2 in /modules/lang-painless
Bumps asm-tree from 7.2 to 9.2.
---
updated-dependencies:
- dependency-name: org.ow2.asm:asm-tree
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
* Updating SHAs
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot[bot]
---
modules/lang-painless/build.gradle | 2 +-
modules/lang-painless/licenses/asm-tree-7.2.jar.sha1 | 1 -
modules/lang-painless/licenses/asm-tree-9.2.jar.sha1 | 1 +
3 files changed, 2 insertions(+), 2 deletions(-)
delete mode 100644 modules/lang-painless/licenses/asm-tree-7.2.jar.sha1
create mode 100644 modules/lang-painless/licenses/asm-tree-9.2.jar.sha1
diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle
index 7f37b5e76d904..c524f9a7e2f2c 100644
--- a/modules/lang-painless/build.gradle
+++ b/modules/lang-painless/build.gradle
@@ -47,7 +47,7 @@ testClusters.all {
dependencies {
api 'org.antlr:antlr4-runtime:4.9.3'
api 'org.ow2.asm:asm-util:9.2'
- api 'org.ow2.asm:asm-tree:7.2'
+ api 'org.ow2.asm:asm-tree:9.2'
api 'org.ow2.asm:asm-commons:9.2'
api 'org.ow2.asm:asm-analysis:7.2'
api 'org.ow2.asm:asm:9.2'
diff --git a/modules/lang-painless/licenses/asm-tree-7.2.jar.sha1 b/modules/lang-painless/licenses/asm-tree-7.2.jar.sha1
deleted file mode 100644
index 986a1c55f5e8f..0000000000000
--- a/modules/lang-painless/licenses/asm-tree-7.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3a23cc36edaf8fc5a89cb100182758ccb5991487
\ No newline at end of file
diff --git a/modules/lang-painless/licenses/asm-tree-9.2.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.2.jar.sha1
new file mode 100644
index 0000000000000..7b486521ecef3
--- /dev/null
+++ b/modules/lang-painless/licenses/asm-tree-9.2.jar.sha1
@@ -0,0 +1 @@
+d96c99a30f5e1a19b0e609dbb19a44d8518ac01e
\ No newline at end of file
From 8f4aec109de82340290d35863217d0ddcd49f383 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 28 Mar 2022 13:13:49 -0700
Subject: [PATCH 17/73] Bump forbiddenapis in
/buildSrc/src/testKit/thirdPartyAudit (#2611)
Bumps [forbiddenapis](https://github.com/policeman-tools/forbidden-apis) from 3.2 to 3.3.
- [Release notes](https://github.com/policeman-tools/forbidden-apis/releases)
- [Commits](https://github.com/policeman-tools/forbidden-apis/compare/3.2...3.3)
---
updated-dependencies:
- dependency-name: de.thetaphi:forbiddenapis
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
buildSrc/src/testKit/thirdPartyAudit/build.gradle | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/buildSrc/src/testKit/thirdPartyAudit/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/build.gradle
index 41e699db94dcf..2c86d28cf0206 100644
--- a/buildSrc/src/testKit/thirdPartyAudit/build.gradle
+++ b/buildSrc/src/testKit/thirdPartyAudit/build.gradle
@@ -40,7 +40,7 @@ repositories {
}
dependencies {
- forbiddenApisCliJar 'de.thetaphi:forbiddenapis:3.2'
+ forbiddenApisCliJar 'de.thetaphi:forbiddenapis:3.3'
jdkJarHell 'org.opensearch:opensearch-core:current'
compileOnly "org.${project.properties.compileOnlyGroup}:${project.properties.compileOnlyVersion}"
implementation "org.${project.properties.compileGroup}:${project.properties.compileVersion}"
From 223efe68e61aa791b97273d717379d164bd2d00c Mon Sep 17 00:00:00 2001
From: Andriy Redko
Date: Tue, 29 Mar 2022 12:23:08 -0400
Subject: [PATCH 18/73] Update Gradle to 7.4.1 (#2078)
* Update Gradle to 7.4.1
Signed-off-by: Andriy Redko
* Address code review comments, added @PathSensitive(PathSensitivity.RELATIVE) where applicable
Signed-off-by: Andriy Redko
---
buildSrc/build.gradle | 2 +-
.../precommit/LicenseHeadersTask.groovy | 5 +++++
.../gradle/precommit/FilePermissionsTask.java | 5 +++++
.../precommit/ForbiddenPatternsTask.java | 5 +++++
.../gradle/precommit/LoggerUsageTask.java | 2 ++
.../gradle/precommit/ThirdPartyAuditTask.java | 2 ++
.../gradle/test/rest/CopyRestApiTask.java | 5 +++++
.../gradle/test/rest/CopyRestTestsTask.java | 5 +++++
.../src/main/resources/minimumGradleVersion | 2 +-
gradle/missing-javadoc.gradle | 4 ++++
gradle/wrapper/gradle-wrapper.jar | Bin 59536 -> 59821 bytes
gradle/wrapper/gradle-wrapper.properties | 4 ++--
12 files changed, 37 insertions(+), 4 deletions(-)
diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle
index 6134e7cd0250e..1ec66b582aed9 100644
--- a/buildSrc/build.gradle
+++ b/buildSrc/build.gradle
@@ -112,7 +112,7 @@ dependencies {
api 'commons-io:commons-io:2.7'
api "net.java.dev.jna:jna:5.10.0"
api 'gradle.plugin.com.github.johnrengelman:shadow:7.1.2'
- api 'de.thetaphi:forbiddenapis:3.2'
+ api 'de.thetaphi:forbiddenapis:3.3'
api 'com.avast.gradle:gradle-docker-compose-plugin:0.14.12'
api 'org.apache.maven:maven-model:3.6.2'
api 'com.networknt:json-schema-validator:1.0.68'
diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy
index b330934ed2d26..b8d0ed2b9c43c 100644
--- a/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy
+++ b/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy
@@ -35,7 +35,10 @@ import org.opensearch.gradle.AntTask
import org.gradle.api.file.FileCollection
import org.gradle.api.tasks.Input
import org.gradle.api.tasks.InputFiles
+import org.gradle.api.tasks.IgnoreEmptyDirectories;
import org.gradle.api.tasks.OutputFile
+import org.gradle.api.tasks.PathSensitive
+import org.gradle.api.tasks.PathSensitivity
import org.gradle.api.tasks.SkipWhenEmpty
import java.nio.file.Files
@@ -78,6 +81,8 @@ class LicenseHeadersTask extends AntTask {
*/
@InputFiles
@SkipWhenEmpty
+ @IgnoreEmptyDirectories
+ @PathSensitive(PathSensitivity.RELATIVE)
List getJavaFiles() {
return project.sourceSets.collect({it.allJava})
}
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java
index 9ffd472151b4b..d525a4a1e2c69 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java
@@ -46,8 +46,11 @@
import org.gradle.api.GradleException;
import org.gradle.api.file.FileCollection;
import org.gradle.api.file.FileTree;
+import org.gradle.api.tasks.IgnoreEmptyDirectories;
import org.gradle.api.tasks.InputFiles;
import org.gradle.api.tasks.OutputFile;
+import org.gradle.api.tasks.PathSensitive;
+import org.gradle.api.tasks.PathSensitivity;
import org.gradle.api.tasks.SkipWhenEmpty;
import org.gradle.api.tasks.StopExecutionException;
import org.gradle.api.tasks.TaskAction;
@@ -92,6 +95,8 @@ private static boolean isExecutableFile(File file) {
*/
@InputFiles
@SkipWhenEmpty
+ @IgnoreEmptyDirectories
+ @PathSensitive(PathSensitivity.RELATIVE)
public FileCollection getFiles() {
return GradleUtils.getJavaSourceSets(getProject())
.stream()
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java
index f57c190496452..754743b9b784c 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java
@@ -37,9 +37,12 @@
import org.gradle.api.file.FileCollection;
import org.gradle.api.file.FileTree;
import org.gradle.api.plugins.JavaPluginConvention;
+import org.gradle.api.tasks.IgnoreEmptyDirectories;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.InputFiles;
import org.gradle.api.tasks.OutputFile;
+import org.gradle.api.tasks.PathSensitive;
+import org.gradle.api.tasks.PathSensitivity;
import org.gradle.api.tasks.SkipWhenEmpty;
import org.gradle.api.tasks.TaskAction;
import org.gradle.api.tasks.util.PatternFilterable;
@@ -100,6 +103,8 @@ public ForbiddenPatternsTask() {
@InputFiles
@SkipWhenEmpty
+ @IgnoreEmptyDirectories
+ @PathSensitive(PathSensitivity.RELATIVE)
public FileCollection getFiles() {
return getProject().getConvention()
.getPlugin(JavaPluginConvention.class)
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java
index 1fd092b7f268f..ff9f6619d64e6 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java
@@ -37,6 +37,7 @@
import org.gradle.api.plugins.JavaPluginConvention;
import org.gradle.api.tasks.CacheableTask;
import org.gradle.api.tasks.Classpath;
+import org.gradle.api.tasks.IgnoreEmptyDirectories;
import org.gradle.api.tasks.InputFiles;
import org.gradle.api.tasks.PathSensitive;
import org.gradle.api.tasks.PathSensitivity;
@@ -79,6 +80,7 @@ public void setClasspath(FileCollection classpath) {
@InputFiles
@PathSensitive(PathSensitivity.RELATIVE)
@SkipWhenEmpty
+ @IgnoreEmptyDirectories
public FileCollection getClassDirectories() {
return getProject().getConvention()
.getPlugin(JavaPluginConvention.class)
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java
index ee68d2740e279..097710b3f1a6e 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java
@@ -47,6 +47,7 @@
import org.gradle.api.tasks.CacheableTask;
import org.gradle.api.tasks.Classpath;
import org.gradle.api.tasks.CompileClasspath;
+import org.gradle.api.tasks.IgnoreEmptyDirectories;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.InputFile;
import org.gradle.api.tasks.InputFiles;
@@ -195,6 +196,7 @@ public Set getMissingClassExcludes() {
@Classpath
@SkipWhenEmpty
+ @IgnoreEmptyDirectories
public Set getJarsToScan() {
// These are SelfResolvingDependency, and some of them backed by file collections, like the Gradle API files,
// or dependencies added as `files(...)`, we can't be sure if those are third party or not.
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java
index 399cd39d236d7..1468c4cb1b537 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java
@@ -43,9 +43,12 @@
import org.gradle.api.file.FileTree;
import org.gradle.api.plugins.JavaPluginConvention;
import org.gradle.api.provider.ListProperty;
+import org.gradle.api.tasks.IgnoreEmptyDirectories;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.InputFiles;
import org.gradle.api.tasks.OutputDirectory;
+import org.gradle.api.tasks.PathSensitive;
+import org.gradle.api.tasks.PathSensitivity;
import org.gradle.api.tasks.SkipWhenEmpty;
import org.gradle.api.tasks.SourceSet;
import org.gradle.api.tasks.TaskAction;
@@ -112,8 +115,10 @@ public boolean isSkipHasRestTestCheck() {
return skipHasRestTestCheck;
}
+ @IgnoreEmptyDirectories
@SkipWhenEmpty
@InputFiles
+ @PathSensitive(PathSensitivity.RELATIVE)
public FileTree getInputDir() {
FileTree coreFileTree = null;
boolean projectHasYamlRestTests = skipHasRestTestCheck || projectHasYamlRestTests();
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java
index 56ce449f4cf6f..dd94d040cb9d8 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java
@@ -43,9 +43,12 @@
import org.gradle.api.file.FileTree;
import org.gradle.api.plugins.JavaPluginConvention;
import org.gradle.api.provider.ListProperty;
+import org.gradle.api.tasks.IgnoreEmptyDirectories;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.InputFiles;
import org.gradle.api.tasks.OutputDirectory;
+import org.gradle.api.tasks.PathSensitive;
+import org.gradle.api.tasks.PathSensitivity;
import org.gradle.api.tasks.SkipWhenEmpty;
import org.gradle.api.tasks.SourceSet;
import org.gradle.api.tasks.TaskAction;
@@ -104,8 +107,10 @@ String getSourceSetName() {
return sourceSetName;
}
+ @IgnoreEmptyDirectories
@SkipWhenEmpty
@InputFiles
+ @PathSensitive(PathSensitivity.RELATIVE)
public FileTree getInputDir() {
FileTree coreFileTree = null;
if (includeCore.get().isEmpty() == false) {
diff --git a/buildSrc/src/main/resources/minimumGradleVersion b/buildSrc/src/main/resources/minimumGradleVersion
index ba92e72f5775b..6b0e58e78f5ee 100644
--- a/buildSrc/src/main/resources/minimumGradleVersion
+++ b/buildSrc/src/main/resources/minimumGradleVersion
@@ -1 +1 @@
-6.6.1
\ No newline at end of file
+7.4.1
\ No newline at end of file
diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle
index df47a3796c825..05531487f35f3 100644
--- a/gradle/missing-javadoc.gradle
+++ b/gradle/missing-javadoc.gradle
@@ -7,6 +7,8 @@
*/
import javax.annotation.Nullable
+import org.gradle.api.tasks.PathSensitive;
+import org.gradle.api.tasks.PathSensitivity;
import org.gradle.internal.jvm.Jvm
/**
@@ -178,6 +180,8 @@ configure([
class MissingJavadocTask extends DefaultTask {
@InputFiles
@SkipWhenEmpty
+ @IgnoreEmptyDirectories
+ @PathSensitive(PathSensitivity.RELATIVE)
SourceDirectorySet srcDirSet;
@OutputDirectory
diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar
index 7454180f2ae8848c63b8b4dea2cb829da983f2fa..41d9927a4d4fb3f96a785543079b8df6723c946b 100644
GIT binary patch
delta 8958
zcmY+KWl$VIlZIh&f(Hri?gR<$?iyT!TL`X;1^2~W7YVSq1qtqM!JWlDxLm%}UESUM
zndj}Uny%^UnjhVhFb!8V3s(a#fIy>`VW15{5nuy;_V&a5O#0S&!a4dSkUMz_VHu3S
zGA@p9Q$T|Sj}tYGWdjH;Mpp8m&yu&YURcrt{K;R|kM~(*{v%QwrBJIUF+K1kX5ZmF
zty3i{d`y0;DgE+de>vN@yYqFPe1Ud{!&G*Q?iUc^V=|H%4~2|N
zW+DM)W!`b&V2mQ0Y4u_)uB=P@-2`v|Wm{>CxER1P^
z>c}ZPZ)xxdOCDu59{X^~2id7+6l6x)U}C4Em?H~F`uOxS1?}xMxTV|5@}PlN%Cg$(
zwY6c}r60=z5ZA1L
zTMe;84rLtYvcm?M(H~ZqU;6F7Evo{P7!LGcdwO|qf1w+)MsnvK5^c@Uzj<{
zUoej1>95tuSvDJ|5K6k%&UF*uE6kBn47QJw^yEG;u^Z9oYWrK(+oL97hBsUMc_^
z;-lmxebwlB`Er_kXp2$`&o+rPJAN<`WX3ws2K{q@qUp}XTfV{t%KrsZ5vM!Q#4{V&
zq>iO$MCiLq#%wXj%`W$_%FRg_WR*quv65TdHhdpV&jlq<=K^K`&!Kl5mA6p4n~p3u
zWE{20^hYpn1M}}VmSHBXl1*-)2MP=0_k)EPr#>EoZukiXFDz?Di1I>2@Z^P$pvaF+
zN+qUy63jek2m59;YG)`r^F3-O)0RDIXPhf)XOOdkmu`3SMMSW(g+`Ajt{=h1dt~ks
ztrhhP|L4G%5x79N#kwAHh5N){@{fzE7n&%dnisCm65Za<8r_hKvfx4Bg*`%-*-Mvn
zFvn~)VP@}1sAyD+B{{8l{EjD10Av&Mz9^Xff*t`lU=q=S#(|>ls520;n3<}X#pyh&
z*{CJf7$*&~!9jMnw_D~ikUKJ2+UnXmN6qak{xx%W;BKuXt7@ky!LPI1qk?gDwG@@o
zkY+BkIie>{{q==5)kXw(*t#I?__Kwi>`=+s?Gq6X+vtSsaAO&Tf+Bl$vKnzc&%BHM
z=loWOQq~n}>l=EL(5&6((ESsQC3^@4jlO5Od{qN#sWV)vqXw}aA>*uvwZopNN(|-T
zRTF%5Y_k1R$;(d-)n;hWex{;7b6KgdAVE@&0pd(*qDzBO#YZV%kh%pYt1`hnQ(Fa&
zYiDrOTDqk5M7hzp9kI2h!PxNnuJ&xl*zF8sx6!67bA49R1bmUF5bpK&&{eI0U~cH}PM
z3aW1$lRb|ItkG5~_eBNu$|I|vYIdAA9a!pVq<+UTx*M}fG`23zxXp&E=FfnY-
zEzKj;Cu_s4v>leO7M2-mE(UzKHL4c$c`3dS*19OpLV^4NI*hWWnJQ9lvzP4c;c?do
zqrcsKT*i~eIHl0D3r4N{)+RsB6XhrC^;sp2cf_Eq#6*CV;t8v=V!ISe>>9kPgh}NI
z=1UZutslxcT$Ad;_P^;Oouoa(cs!Ctpvi>%aQ+Zp=1d|h{W9Wmf7JWxa(~<#tSZ?C%wu4_5F!fc!<@PIBeJ)Nr^$bB6!_Gic_7}c3J{QI~Gg5g5jTp9}V6KYgrgaX>pJt}7$!wOht&KO|+z{Iw@YL|@~D
zMww}+lG}rm2^peNx>58ME||ZQxFQeVSX8iogHLq_vXb`>RnoEKaTWBF-$JD#Q4BMv
zt2(2Qb*x-?ur1Y(NsW8AdtX0#rDB?O(Vs4_xA(u-o!-tBG03OI!pQD+2UytbL5>lG
z*(F)KacHqMa4?dxa(Vcrw>IIAeB$3cx#;;5r2X;HE8|}eYdAgCw#tpXNy7C3w1q`9
zGxZ6;@1G%8shz9e+!K2MO*{_RjO}Jo6eL3{TSZ>nY7)Qs`Dhi5><@oh0r)gT7H-?3
zLDsd^@m%JvrS8sta5`QiZNs^*GT}Hiy^zjK2^Ni%`Z|ma)D2
zuyumbvw$M8$haCTI~6M%d4+P)uX%u{Sfg4Al+F7c6;O-*)DKI7E8izSOKB#FcV{M+
zEvY0FBkq!$J0EW$Cxl}3{JwV^ki-T?q6C30Y5e&p@8Rd?$ST-Ghn*-`tB{k54W<>F
z5I)TFpUC!E9298=sk>m#FI4sUDy_!8?51FqqW!9LN1(zuDnB3$!pEUjL>N>RNgAG~-9Xm|1lqHseW(%v&6K(DZ3Pano(1-Qe?3%J&>0`~w^Q-p&@
zg@HjvhJk?*hpF7$9P|gkzz`zBz_5Z!C4_-%fCcAgiSilzFQef!@amHDrW!YZS@?7C
zs2Y9~>yqO+rkih?kXztzvnB^6W=f52*iyuZPv$c42$WK7>PHb
z6%MYIr5D32KPdwL1hJf{_#jn?`k(taW?mwmZVvrr=y~fNcV$`}v(8};o9AjOJumS4
z`889O91^pkF+|@$d9wVoZ3;^j;^sUs&Ubo_qD&MTL%O
z&*SE0ujG~zm;?x)8TLC&ft))nyI
zcg44@*Q{cYT+qGrA=In_X{NNCD+B0w#;@g)jvBU;_8od6U>;7HIo@F*=g8CQUo(u^
z3r4FJ7#<@)MXO&5+DgKE&^>^`r!loe7CWE*1k0*0wLFzSOV8jvlX~WOQ?$1v
zk$Or}!;ix0g78^6W;+<=J>z@CBs!<<)HvF(Ls-&`matpesJ5kkjC)6nGB@b{ii6-Uoho$BT%iJgugTOeZ$5Xo4D7Pd<
zC*LJh5V@2#5%aBZCgzlQi3@<_!VfiL07ywc)ZbwKPfcR|ElQoS(8x|a7#IR}7#Io=
zwg4$8S{egr-NffD)Fg&X9bJSoM25pF&%hf>(T&9bI}=#dPQyNYz;ZZ7EZ=u1n701SWKkZ9n(-qU
ztN`sdWL1uxQ1mKS@x11;O|@^AD9!NeoPx}?EKIr!2>1Qq4gjfGU)tr6?Z5l7JAS3j
zZeq{vG{rb%DFE4%$szK}d2UzB{4>L?Tv+NAlE*&Nq6g+XauaSI+N2Y8PJLw+aNg1p
zbxr|hI8wcMP&&+(Cu|%+Jq|r>+BHk@{AvfBXKiVldN)@}TBS0LdIpnANCVE26WL-}
zV}HJ^?m&$Rkq;Zf*i-hoasnpJVyTH__dbGWrB_R55d*>pTyl6(?$EO@>RCmTX1Hzr
zT2)rOng?D4FfZ_C49hjMV*UonG2DlG$^+k=Y%|?Dqae4}JOU=8=fgY4Uh!pa9eEqf
zFX&WLPu!jArN*^(>|H>dj~g`ONZhaaD%h_HHrHkk%d~TR_RrX{&eM#P@3x=S^%_6h
zh=A)A{id16$zEFq@-D7La;kTuE!oopx^9{uA3y<}9
z^bQ@U<&pJV6kq7LRF47&!UAvgkBx=)KS_X!NY28^gQr27P=gKh0+E>$aCx&^vj2uc}ycsfSEP
zedhTgUwPx%?;+dESs!g1z}5q9EC+fol}tAH9#fhZQ?q1GjyIaR@}lGCSpM-014T~l
zEwriqt~ftwz=@2tn$xP&-rJt?nn5sy8sJ5Roy;pavj@O+tm}d_qmAlvhG(&k>(arz
z;e|SiTr+0<&6(-An0*4{7akwUk~Yf4M!!YKj^swp9WOa%al`%R>V7mi
z+5+UodFAaPdi4(8_FO&O!Ymb#@yxkuVMrog(7gkj$G@FLA#ENMxG)4f<}S%Fn?Up$+C%{02AgMKa^
z4SFGWp6U>{Q6VRJV}yjxXT*e`1XaX}(dW1F&RNhpTzvCtzuu;LMhMfJ2LBEy?{^GHG!OF!!
zDvs64TG)?MX&9NCE#H3(M0K>O>`ca0WT2YR>PTe&tn?~0FV!MRtdb@v?MAUG&Ef7v
zW%7>H(;Mm)RJkt18GXv!&np
z?RUxOrCfs;m{fBz5MVlq59idhov21di5>WXWD-594L-X5;|@kyWi@N+(jLuh=o+5l
zGGTi~)nflP_G}Yg5Pi%pl88U4+^*ihDoMP&zA*^xJE_X*Ah!jODrijCqQ^{=&hD7&
z^)qv3;cu?olaT3pc{)Kcy9jA2E8I)#Kn8qO>70SQ5P8YSCN=_+_&)qg)OYBg|-k^d3*@jRAeB?;yd-O1A0wJ
z?K*RDm|wE<(PBz~+C%2CTtzCTUohxP2*1kE8Of~{KRAvMrO_}NN&@P7SUO{;zx0iK
z@or9R8ydYOFZf(cHASCAatL%;62IL27~SmASr(7F&NMr+#gNw@z1VM
z_ALFwo3)SoANEwRerBdRV`>y`t72#aF2ConmWQp(Xy|msN9$yxhZ1jAQ67lq{vbC5
zujj|MlGo`6Bfn0TfKgi(k=gq0`K~W+X(@GzYlPI4g0M;owH3yG14rhK>lG8lS{`!K
z+Nc@glT-DGz?Ym?v#Hq|_mEdPAlHH5jZuh*6glq!+>Lk$S%ED2@+ea6CE@&1-9a?s
znglt|fmIK}fg<9@XgHe4*q!aO<-;Xj$T?IzB-{&2`#eA6rdtCi80mpP&vw(Uytxu$#YzNI_cB>LS
zmim>ys;ir;*Dzbr22ZDxO2s;671&J0U<9(n1yj)J
zHFNz=ufPcQVEG+ePjB<5C;=H0{>Mi*xD>hQq8`Vi7TjJ$V04$`h3EZGL|}a07oQdR
z?{cR(z+d>arn^AUug&voOzzi$ZqaS)blz-z3zr;10x;oP2)|Cyb^WtN2*wNn`YX!Y
z+$Pji<7|!XyMCEw4so}xXLU)p)BA~2fl>y2Tt}o9*BPm?AXA8UE8a;>rOgyCwZBFa
zyl42y`bc3}+hiZL_|L_LY29vVerM+BVE@YxK>TGm@dHi@Uw*7AIq?QA9?THL603J%
zIBJ4y3n8OFzsOI;NH%DZ!MDwMl<#$)d9eVVeqVl(5ZX$PPbt*p_(_9VSXhaUPa9Qu
z7)q4vqYKX7ieVSjOmVEbLj4VYtnDpe*0Y&+>0dS^bJ<8s*eHq3tjRAw^+Mu4W^-E=
z4;&namG4G;3pVDyPkUw#0kWEO1;HI6M51(1<0|*pa(I!sj}F^)avrE`ShVMKBz}nE
zzKgOPMSEp6M>hJzyTHHcjV%W*;Tdb}1xJjCP#=iQuBk_Eho6yCRVp&e!}4IBJ&?ksVc&u#g3+G$oNlJ?mWfADjeBS-Ph3`DKk-~Z70XugH8sq2eba@4
zIC1H_J$`9b$K`J)sGX3d!&>OmC@@rx1TL~NinQOYy72Q_+^&Mg>Ku(fTgaXdr$p_V
z#gav1o{k~c>#)u3r@~6v^o)Lf=C{rAlL@!s457pq)pO;Cojx7U{urO4cvXP|E>+dV
zmr2?!-5)tk-&*ap^D^2x7NG6nOop2zNFQ9v8-EZ{WCz-h36C)<^|f{V#R_WE^@(T0+d-at5hXX{U?zak*ac-XnyINo+yBD~~3O1I=a
z99|CI>502&s-Qi5bv>^2#cQ%ut<4d7KgQ^kE|=%6#VlGiY8$rdJUH{sra;P~cyb_i
zeX(kS%w0C?mjhJl9TZp8RS;N~y3(EXEz13oPhOSE4WaTljGkVXWd~|#)vsG6_76I)Kb
z8ro?;{j^lxNsaxE-cfP;g(e;mhh3)&ba}li?woV2#7ByioiD>s%L_D;?#;C#z;a(N
z-_WY<=SH42m9bFQ>Nb
z@4K$@4l8pD7AKxCR>t0%`Qoy9=hA?<<^Vcj8;-E+oBe3ReW1`el8np8E$k{LgFQ}2
z2t8a`wOXFdJ9!5$&mEfD1CnJ)TB+RJih88-Zos9@HZ#
zL#{qfbF0ARTXkR@G{lwlOH~nnL)1jcyu!qv2`57S&%oKz0}r{~l9U_UHaJ5!8#nrs
z?2FrL`mxnzu&{bweD&62)ilz*?pYIvt`T!XFVVA78})p1YEy7
z8fK#s?b~Yo$n7&_a?EBdXH-_W)Z44?!;DFx6pZ?~RArtBI*Qm4~6nX6Z_T*i$bQPE;Qz?DAPstpGSqr-AJ
zo%m9cA`oDDm?&dTaoh_>@F>a?!y4qt_;NGN9Z<%SS;fX-cSu|>+Pba22`CRb#|HZa
z;{)yHE>M-pc1C0mrnT~80!u&dvVTYFV8xTQ#g;6{c<9d!FDqU%TK5T6h*w*p980D~
zUyCb`y3{-?(mJFP)0*-Nt;mI$-gc4VQumh|rs&j_^R{sgTPF`1Xja2YWstsKFuQ(d
zmZMxV$p$|qQUXchu&8%J(9|)B?`~rIx&)LqDS>ob5%gTeTP#Sbny#y*rnJ&?(l=!(
zoV~}LJ1DPLnF8oyM(2ScrQ0{Q4m4-BWnS4wilgCW-~~;}pw=&<+HggRD_3c@3RQIr
z9+-%!%}u_{`YS=&>h%kPO3ce}>y!d-zqiniNR-b5r97u;+K6HA2tS>Z#cV{+eFI`*
zd8RMGAUtX1KWfPV;q<-5JAykS+2sY$2~UX+4461a(%{P#{rwFPu0xpIuYlbgD{C7C
z=U{FUarVTYX6ZUq3wE@G^QT4H2Re;n$Fz9cJ>hABl)9T8pozqbA1)H-%1=WKm^QMu
zjnUZ&Pu>q+X&6Co*y#@pxc-4waKMInEPGmE_>3@Ym3S*dedSradmc5mlJn`i0vMW6
zhBnGQD^Z;&S0lnS0curqDO@({J7kTtRE+Ra?nl^HP9<)W&C>~`!258f$XDbyQOQXG
zP8hhySnarOpgu8xv8@WlXnm(Uk~)_3$Sg0vTbU3
z{W!5B(L3{Yy3K5PN<@jEarAtja`}@KYva&zFRF*s+_%jIXh$T(S=an8?=Ry3H*NRqWgsM`&!#|@kf1>=4q%bFw7^Rhz!z5I
zyI^zU8_R1WN9`88Z=n>pIZQ`Ixr~_9G%Q}@A7rd#*%y7G
zXl^Id=^ZL?Rx}}gWXCqzj9C6;x(~mAH|$JteXa1MH<6UQig@!Hf~t}B%tP0I|H&;y
zO6N0}svOa1a^PyP9N5?4W6VF%=Bj{qHUgc8@siw4bafT=UPFSoQqKgyUX>sXTBZ=x
zOh^Ad!{kOM9v{%5y}`-8u*T&C7Vq6mD%GR}UeU(*epO&qgC-CkD;%=l)ZuinSzHM`
z{@`j&_vC6dDe{Yb9k@1zeV_K6!l(@=6ucoI=R^cH=6{i71%4W3$J-?<8Qn#$-DMtA
z6Qqi)t?4ifrt%3jSA#6ji#{f(($KBL-iQh-xrC||3U3lq`9>r)>X%oLvtimuHW-)}
zy}>9~|M>w4eES`g7;iBM%Se5-OP%1U6gNWp3AZqT8C6OlFFfQ$|7LL;tBV)(qlp4K
zruar^K8FnJN3@_}B;G`a~H`t|3+6d>q3#`ctTkE-D^1#d9NalQ04lH*qUW2!V
zhk7#z8OwHhSl8w14;KctfO8ubZJ4$dEdpXE78wABz=n5*=q9ex3S}`e7x~~V-jmHOhtX2*n+pBslo3uosdE7xABK=V#-t{1Hd~?i
z{i~%Bw6NYF+F$aK$M`r#xe=NxhA5=p%i7!$);sd>Q}#`G?Q~fygrMXmZw?0#5#17W}6Tj+&kFexG{!mYl5FoA99}3G9l;3lVQ^
z48^~gsVppE*x91WheqI(A%F0Z#$#1UJP1R12Mj9r)y(A?a+iquX+d8WD4WAQJ_!oq
z9rTISr7bPd(GTP57xm$}C}&kjMivi;zi^Y9g3&X0A;ovdJ?{%_wHgt%%9P&N4H
z^XzV(uNA4
zAP`hgP6BEN5`YXh|DF~6Pud?~gWfhUKoPX4>z|}0aocC&K+AoV%|SX*N!wGq3|y<
zg4lP(04XIPmt6}$N!dTk+pZv>u;MTB{L4hp9uXk7>aS!6jqM2lVr%{)H3$O127TSZ
z0x9hi0k-P?nWFdQ0K`pykqUIT&jD~B0tHP{ffS(}fZ(aW$oBWTSfHO!A^><6vA?qar%tzN-5NQO
zL&|F{nGiQyzNJ+bM$Y`n=Lx^3wTG^o2bGB@cwr1eb+6c-1tN=U+Db;bc~eJ!hwM{SbI=#g?$!PjDB+)
zPgU_2EIxocr*EOJG52-~!gml&|D|C2OQ3Y(zAhL}iae4-Ut0F*!z!VEdfw8#`LAi#
zhJ_EM*~;S|FMV6y%-SduHjPOI3cFM(GpH|HES<}*=vqY+64%dJYc|k?n6Br7)D#~#
zEqO(xepfaf2F{>{E2`xb=AO%A<7RtUq6kU_Iu0m?@0K(+<}u3gVw5fy=Y4CC*{IE3
zLP3YBJ7x+U(os5=&NT%gKi23bbaZ`@;%ln)wp4GpDUT$J8NtFDHJzIe_-t}{!HAsh
zJ4<^WovY};)9IKAskSebdQiXv$y5}THuJZ}ouoElIZRui=6lrupV|_Jz=9^&;@HwL;J#@23k?A;k`0Bgf;ioO>W`IQ+4?
z7A)eKoY4%+g%=w;=Vm8}H>@U*=*AWNtPqgWRqib#5RTGA@Q=43FrQn3J`GkTUV5yp0U`EOTqjfp+-9;0F8!dMEwwcK%(6`8sDD^aR04
zd6O5vh|Xk?&3dy4f|1QK&Ulf{h6Iq;d-&*ti#Ck>wZFG;GHwc?b;X~eBITx49>2d8
z4HcK&1&DvEGT6kXdzAm4oO8%c}8OBt~8H956_;YP-ss*uMf==a+%w~F>Qkm7r)IAuxuoX}h92$gHqbFUun#8m
zWHdy`Zrm#=Pa98x8cO0vd@Tgkr*lm0{dky+Gocr0P8y%HGEI#c3qLqIRc`Oq_C%*;
zG+QTr(#Q|yHKv6R@!DmLlwJQ3FAB)Yor-I4zyDyqM4yp5n2TrQH>gRt*Zw0+WI-Sj`EgmYHh=t9!
zF6lz^xpqGGpo6!5`sc0a^FVhy_Uxq|@~(1@IIzV)nTpY9sY`CV!?8e&bB8=M&sYEb
z2i}fvKdhp9Hs68Y-!QJ<=wE(iQ5+49tqt;Rh|jhYrI5VW-mIz|UY{h8E=rC5sh#DU
z?wGgk-Tn!I?+Zer7pHlF_Z^!Kd1qkS3&lv#%s6-<5Y%jQL${cge5=G5Ab?D&|9$Y~
zf%rJC2+=2vg;y0-SJb3<@3%}BO$T$C66q$L_H33a`VUbgW~N(4B=v5(<=My|#|J7q
z*Ox4wL4kbJd_~EjLTABSu4U7Jk#`y(6O*U6(k6XxM}CtGZB(H@3~kh*zaGRXM}Iwp
zQ%xFk2>@wiZrVCV_G4G~v;NebCQ%T7{SDyPpSv&dT@Cn)Mx@IK*IdNrj{*4pkV4wv
z)y0J538h>cpB7iPSzA~x24T`{dzNkpvGIqvt1Dvdq@o-`B=$hkczX8$yFMhsWNK-X
zxr$kR$tMD0@W)Vxe1^t9qVmsg&K^F@u84)(n2dttIEAZFN6VD$&tskpG%SI7whGL3
z)DeRiwe&?8m7U{G`oW8!SCi*dM>oYL%UKQnKxV_0RXAEBQg1kStExGEUVwLJ0orGGwb7uv+kPDl7_E2*iD|J*=8A@;XCvwq0aw5oJYN*Yh&o=l}
z2z8YKb-fIAH5spql4eXqp*)o2*b>#1@DSt?zZi{GPj0gH&Nm+EI<3^z0w%YTEV4xw
zI6$+=Faa|Y4o5i0zm5lOg|&tmnJ806DBovU@Ll6XsA;NRrTK~t*AAJIAS=v-UZ%Pr
z$oddI@NRir&erzCwq|)ciJemr-E061j{0Vc@Ys7K(mW|JYj*$+i1Q8XlIK8T?TYS(AXu$`2U
zQ@fHxc=AVHl_}cRZQ)w0anMEoqRKKIvS^`<-aMf*FM`NsG&Uowneo+Ji$7DUDYc7*Hjg;-&aHM%3
zXO6cz$$G};Uqh+iY7Wpme>PHG4cu(q;xyskNLs$^uRRMfEg?8Cj~aE-ajM%CXkx0F
z>C?g3tIA#9sBQOpe`J+04{q7^TqhFk^F1jFtk4JDRO*`d-fx`GYHb=&(JiaM1b?Y^
zO3Kj3sj76ieol|N$;>j@t#tKj=@*gP+mv}KwlTcPYgR$+)2(gk)2JNE=jSauPq!$<
z<|?Sb%W)wS)b>b6i{8!x!^!xIdU3{CJFVnTcw0j{M%DUCF=_>eYYEUWnA-|B(+KYL
z_W_`JI&&u^@t0})@DH^1LDuT0s3dMpCHIbYBgOT4Zh_4yHbSqRbtIKndeT4Q*Jg91
z@>rO!^t-G~*AIW;FQ$3J=b;oGg8?CTa~qNCb>&cgp@e;?0AqA&paz~(%PYO+QBo4(
zp?}ZdSMWx0iJm7HVNk9A#^9Osa#GPJ!_pYEW}($8>&2}fbr@&ygZ?${A7_9?X$(&5
z#~-hxdPQwCNEpf=^+WH-3`2LxrrBMTa}~qJC9S;VzhG!On^JLyW6WkF{8aAE$sM+(
zxr8xLW(KIjI`Rm(24r3OJBk<3GF=G!uSP0-G&AY32mLm8q=#Xom&Pqv=1C{d3>1^
zAjsmV@XZ%BKq^eUfBpa8KvO8ob|F3hAjJv*yo2Bhl0)KUus{qA9m8jf)KnOGGTa6~4>3@J_VzkL|vYPl*uL+Ot*Q7W!f5rJw5+AsjP_IfL+-S*2p|
zB7!FhjvkUTxQkGWGSg{X;h~dK>gAJivW?88Nu!3o>ySDaABn$rAYt086#27fbjPQS
zhq>55ASvm*60qRdVOY9=bU^+{Pi#!OaZwENN;zy5?EztOHK-Q5;rCuiFl}BSc1YaQ
zC-S{=KsGDz@Ji9O5W;XxE0xI|@3o6(2~i4b8Ii9VT;^G$*dRw(V?=br)D&q^XkeBX
z+gl~+R@rVD-Hwv@7RHV?Bip5KMI)aV^&snt?H<$Nt=OPx#VxF&BGi?2A2+lNOYywNUGMeGL;|(=UjGDtLG0sN&LpGx;|U;xa13s
z;W_|SPk^G}!M9_^pO
zA3bt3-tca%^42sHeDtfcC0S3w3H1ny!Bxpa=*k?XRPpx9Bb-gx1J9Yvx)4J(8cG+q
z(iCPZ9dsf3#QVyZgD_MW#G#qgV)olu$59&3(PzQfw@%4uZ~<5J=ABvdY43(Qnp{;G
zHg3>@T#>DbTuhFl3)fb3TFqdh)V2aq7!;&JOHseTWukvA7}(iGUq;v-{2J0iHSNHq
z;+)h!p6Ok^+Sp8-jgL($n6Qu47xyE`cFO5SdZR6;R!FET`tm#0D37z339Suxjpv+s
z*=%2-N$N?X&0?x_uut3erF@aBGj;9$k9?3FlbDO{RQa1_qtxrh4!4#fjp4x~akvdTp@
zos?^Q&XE;3N93s4rHQGPrV7+au1$$aB6$hLy*Yz_kN$~dweb9PcB!eYVQTGjFuJP>
zZCEwBtb>TIgIO^qAzq@Bv-qud_ZD-2W<_at&ml-gv`tPt$@DF5`HlA
zM>DmmMkpv&Zm-8)Y#0bLQf4MpD4_-7M8eu6rh(tL8dq8onHs#R9J~dGd2IaXXMC~h
z91pKhnQa%Fsn29nAA1;x(%oC
zhca~qQDJaMf?wFrl-Pj;e$bZMYmMF!Y3Lv&Sb?Sjn#!NVx&NDyc^$b4uYyo2OmERa
zRz;yDGd@JTykzFLe|Wk-y7#3x`6$wt$zR8r48mdUvfbeL+4D|Z``~7$PrE@qc7rZe
zVsIoIbCwzjLZ@_M1*bD{HaYn();Z1-q*-I{tEnTZ(}Zmk&%MXSNBX>o|
z-u*RNkAyKC-Srp7c-=@5f)xMWg>o2WWl}j6j9=8+D8;T
z>0*0q#;qw8%U8i;6s0fu#I*%(g*@@a2Er@@nyI}{=@W{Z-;`=wN4N~>6Xrh&z#g}l
zN1g5}0-#(nHUTv_rl2{yUZ;h#t&Fd?tY!7L%ClY)>uH-Ny2ET$lW$S)IQiN79H)D^
zb&0AXYkupy0~w8)*>Sj_p9}4L?lGTq%VG|2p`nWGhnM^!g|j-|O{%9Q%swOq63|*W
zw$(N_laI}`ilB+o!a-wl?er~;;3+)$_akSQ!8YO_&-e*SI7n^(QQ;X0ZE`{4f!gAl
z5$d+9CKVNonM!NO_frREICIAxOv)wm>}-k?iRisM`R7;=lyo|E_YR~FpS&PS`Lg0f
zl-ON<0S%Uix8J%#yZdkCz4YNhcec<|7*P(JsM#>-L>+tYg_71q9~70FAc^6KW5jql
zw!crdgVLH1G_eET=|SEc977;)ezVC|{PJZfra|}@rD;0s&@61mTEBJtILllg{%{vN
zfhb&lq0yChaLhnJ-Qb62MB7`>M;|_ceHKZAeeh@#8tbrK!ArP6oXIhMK;dhEJTY`@
z0Tq>MIe0`7tGv)N*F0IGYSJv0vN?Az8g+4K9S!pW2~9F4W(_U_T=jCZrzuZ3*|__T
zONp_UWmyePv8C~rckc?Xji;Z5OEqg
zC*Um)i;Wh4TEwqReQdVVbUKT^2>Tpi6z_^-uF*adUFug4i@JhzpWT^Sk&E>CyP2?H
zWf6x}ehuTs6wvzCnTU&gYzT029Nz19(In1WC
z`(1IGmi!O%2AR|BjQa4Q0~u)kM%}?xQyjWuQ16^Gp++;`vr7!k--UZWM*~7Zl|ceO@I3`OpaRhD;YoCuo5IC0uHx>9
z478hu@H|e0Zlo)Zj@01#;8BDs@991xe~^9uG2}UXLM(m7fa}AMwX*tjioBeV&Q8Gx
zSq$6wZFkRBK`cMI>R(@W@+lo2t)L+4q-negWRLWZBz*|%=W4v62JrmzNuOtA*x)QE
z5L%=OH#@KMdB%Jp^r?0tE}5-*6oP`-lO7Sf)0)n*e<{HA=&qhLR)oD8-+V}Z4=md)
z+k9lKf64DB2hAT)UaCP~di?-V3~JBH7itYyk~L6hrnxM%?RKntqd`=!b|e7eFnAcu
z3*V;g{xr7TSTm$}DY%~SMpl>m{Sj!We+WfxSEor?YeiAxYUy25pn(?T()E>ByP^c@
zipwvWrhIK((R((VU+;@LmOnDu)ZXB3YArzzin!Z^0;PyJWnlfflo|q8(QY;o1*5CO
z##hnkO{uynTMdk`~DOC#1
zdiYxQoy}=@7(ke#A8$YZZVtk4wo$8x28&I;cY3Ro-|kW=*yiiHgCLZeAr)UtVx>Tu
z|LvL0hq|1-jC0I4x#>&QZCfrVB=zT!nR|~Uz`9%~2
znl{uZ{VEszW`Fad^q_HB!K9*|U-stK%?~;g?&&+12A}Rq$z($Bzuk^2X(Y=hF?-dQ
ztc3DsQKI;qhWIV`99Q#R3xnU0AvY!i*BECj-z9l74|%O=V@nlv|qqC^r^-~C?E
zGW%c|uYgnfJ(gjsTm_cIqcv*mYM{+i+&@F@+69ZQOK&u#v4oxUSQJ=tvqQ3W=*m;|
z>SkBi8LYb-qRY7Sthh*0%3XAC%$z1rhOJzuX=PkTOa=DlocZUpE#KxVNH5)_4n=T(
zGi3YrH7e~sPNYVBd~Grcq#CF~rN{p9Zza-Ntnwfma@TB)=3g36*0lSZg#ixEjFe%+
zX=&LDZ5zqculZ`=RYc^ln(~;nN|Qh6gN=!6f9-N2h+3NWbIxYud&;4SX*tWf5slk4
z{q@@l71UAZgj~*6edXb57fBUxvAS7s(RI=X868JM0+^DCn2yC>;v%S;qPOjB>YVsz(Zx9a>>BK&M
zIQK>7_n)4ud0X5YM}^i*keH{ehLsiy9@NvOpsFeQjdI6anLGvVbBw_*fU1TzdVS$i
z*4j7z!I5RF#rSz|8ibi$;qE{4`aqWYik7QB5U&F5C*;TO_x+gtzPGpzNt!7~nsBT7)Ckc(K~%uv&{{6A`mmBJVAk-{s~52Vu|HbCH7_W1~ZCX^RflOakGg=jo2Z
z<*s;5-J+2@^LRDZ-7EV&Pq+FTErw@pfFqvx^i%E7Fx#^n(E`m2(c>K-O5`M`Yek9el
zzTGs5qD6*G;y#~xu3>qWuO?-amKYtvRA}I9z#UspEeM;wOERYeot_n_EUMJf$4_u?E!6X~?q)tPoZb^_;8Y_Ox2h1m<+Le-fsRd|T8db<8#$bqez
zua^Z|>h%zdnuU^ww$#-dZ9NTM`FN+!IlLkz*FqWb!x^Z|C{KyGjZ+>G;;7Mb@LY|H
zc+Gp`L((Dw7pnDlHNm&;SfHedhx*kad$I^uGz{`0BYelq0yEUHpNKSkvj$|dpvY3{7*YGyhXA^LP0&wOw9oNoC=QoVx1<2Dne8qqZL
zm>nFh5DX(-RnQwvHCZQwn^#Z=E!SPVlaRJ78Bo@}!!9dRt^qZy?-*`Pt4WSmgucJv
zV1yFkcjlEM^uz-;b#Q7ZCP@Lk)m}uPX={R4B=56k7WNh11BN~0T*vr@!!ow^B0hOR
zQ)4)&(e%>bNNL%bm<&8H{*l_L7s0$2GUgX2Vd;=4d9Dm2v3TaL+;L>{K7h7
zV#k?xDPm(NDE31$
z<}|X)pEY6myjK+^gaIMk&Yj2~F0rSKemNqlsVm4c|N7mp_C*L01s;GNx#D-*&gk!qQr}^?_r@q!8fuXw!)fA7xkd}
zb>vHvdx~H$5qqAWrow7}+8zBM65-JOt5z
za=T6f7MK`XJuQog8kIEboPdhcaVJeHy)5z7EBLK5NRr()E|#K0L0N^JD@pUA^Czb`
zbUZ_558y+vqAGeyHCbrvOvLD67Ph}06959VzQ_|>RrXQAqE+AQ(-AaKdxoWaF8hdt
z{O3W@b^*o#-f1VuU>YMV03ELF7zkCN4Q&b#prz%3Nne0lSbRo@@
z^ihv%oIl~Qyl6Q;a#$*jOC%x0_;eis*)J7=f@Ct*)xF5
zo}u~@-I}2|$b%5L7>@+Z?4o+1r&v6ceIy+vroK&jCQ<4q&45HP2wCol4hVm3pZtjf
zHz1D7oyaSKJ~T{Gx}7ONLA)D5k(%%`WswrDyzX*rn}i}}TB4^y#@mAwPzoC)`?rYv
zHgx|trUN#mu*VzUV~8TnJM2Qh*ZM5B{x&y>5An`(M7=Z*Q>TdiH@j*2=moNuOtvpz
z+G`@~-`%~+AgPKgke@XiRPgndh@bp*-HRsh;HTtz@-y_uhb%7ylVOTqG0#u?Vn5c5
zEp*XRo|8hcgG^$#{$O9CJ&NE;TrfRpSnLmes&MO{m=N%zc`}gb!eQ7odl$oy1%PI}
z#AIxx%oRVy&{O~9xnK4$EY>(eQj}!HKIV$Fz*H=-=Kn)N0D6u`(;iO|VraI4fu_W`
z;b5{7;Lyx4za}DU#+U7}=H0dAS#YJJ&g2!P@Htu-AL&w=-)*%P9h2{wR|@?Ff9~)b
z^+e_3Hetq7W%ls{!?<6&Y$Z;NNB41pvrv)|MET6AZXFXJeFqbFW5@i5WGzl?bP+~?
z*&_puH;wKv2)9T_d+P`bLvJFqX#j&xa*-;0nGBbQf0DC>o~=J_Wmtf*2SZQr?{i~X
z9-IbRH8{iy?<0v9Ir1?$66+igy|yDQ5J~A9sFX@Pe<*kCY8+MwH?I
z`P}zfQ6l^AO8ehZ=l^ZR;R%uu4;BK*=?W9t|0{+-at(MQZ(CtG=EJFNaFMlKCMXu30(gJUqj5+
z`GM|!keqcj;FKTa_qq;{*dHRXAq157hlB@kL#8%yAm2AgfU|*rDKX@FLlp=HL8ddv
zAWLCHe@DcDeB2}fl7#=0+#<05c3=VqM*O3bkr@9X4nO|)q0hU;Gye{L8ZN*NH8Id@mP-u;Fmb8YuorjLrW&ndip8CN%_qp982r
w1WEnz9^$&s1hkp_3#lPJQ~!HI7WYYjA7>z!`?f%npAh2%rB@vD|Lau$2O)#1n*aa+
diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties
index daf75f8e132cb..30b8947900f87 100644
--- a/gradle/wrapper/gradle-wrapper.properties
+++ b/gradle/wrapper/gradle-wrapper.properties
@@ -11,7 +11,7 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
-distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip
+distributionUrl=https\://services.gradle.org/distributions/gradle-7.4.1-all.zip
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
-distributionSha256Sum=c9490e938b221daf0094982288e4038deed954a3f12fb54cbf270ddf4e37d879
+distributionSha256Sum=a9a7b7baba105f6557c9dcf9c3c6e8f7e57e6b49889c5f1d133f015d0727e4be
From d8a1ba691204976e1b0e4ffc8e62b08a22e63692 Mon Sep 17 00:00:00 2001
From: Andriy Redko
Date: Tue, 29 Mar 2022 12:24:37 -0400
Subject: [PATCH 19/73] [CVE-2020-36518] Update jackson-databind to 2.13.2.2
(#2599)
Signed-off-by: Andriy Redko
---
buildSrc/build.gradle | 2 +-
buildSrc/version.properties | 1 +
distribution/tools/upgrade-cli/build.gradle | 2 +-
.../upgrade-cli/licenses/jackson-databind-2.13.2.2.jar.sha1 | 1 +
.../tools/upgrade-cli/licenses/jackson-databind-2.13.2.jar.sha1 | 1 -
libs/dissect/build.gradle | 2 +-
modules/ingest-geoip/build.gradle | 2 +-
.../ingest-geoip/licenses/jackson-databind-2.13.2.2.jar.sha1 | 1 +
modules/ingest-geoip/licenses/jackson-databind-2.13.2.jar.sha1 | 1 -
plugins/discovery-ec2/build.gradle | 2 +-
.../discovery-ec2/licenses/jackson-databind-2.13.2.2.jar.sha1 | 1 +
plugins/discovery-ec2/licenses/jackson-databind-2.13.2.jar.sha1 | 1 -
plugins/repository-azure/build.gradle | 2 +-
.../licenses/jackson-databind-2.13.2.2.jar.sha1 | 1 +
.../repository-azure/licenses/jackson-databind-2.13.2.jar.sha1 | 1 -
plugins/repository-hdfs/build.gradle | 2 +-
.../repository-hdfs/licenses/jackson-databind-2.13.2.2.jar.sha1 | 1 +
.../repository-hdfs/licenses/jackson-databind-2.13.2.jar.sha1 | 1 -
plugins/repository-s3/build.gradle | 2 +-
.../repository-s3/licenses/jackson-databind-2.13.2.2.jar.sha1 | 1 +
plugins/repository-s3/licenses/jackson-databind-2.13.2.jar.sha1 | 1 -
qa/os/build.gradle | 2 +-
qa/wildfly/build.gradle | 2 +-
test/fixtures/hdfs-fixture/build.gradle | 2 +-
24 files changed, 18 insertions(+), 17 deletions(-)
create mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.2.2.jar.sha1
delete mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.2.jar.sha1
create mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.13.2.2.jar.sha1
delete mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.13.2.jar.sha1
create mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.13.2.2.jar.sha1
delete mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.13.2.jar.sha1
create mode 100644 plugins/repository-azure/licenses/jackson-databind-2.13.2.2.jar.sha1
delete mode 100644 plugins/repository-azure/licenses/jackson-databind-2.13.2.jar.sha1
create mode 100644 plugins/repository-hdfs/licenses/jackson-databind-2.13.2.2.jar.sha1
delete mode 100644 plugins/repository-hdfs/licenses/jackson-databind-2.13.2.jar.sha1
create mode 100644 plugins/repository-s3/licenses/jackson-databind-2.13.2.2.jar.sha1
delete mode 100644 plugins/repository-s3/licenses/jackson-databind-2.13.2.jar.sha1
diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle
index 1ec66b582aed9..cc7742a0d4390 100644
--- a/buildSrc/build.gradle
+++ b/buildSrc/build.gradle
@@ -116,7 +116,7 @@ dependencies {
api 'com.avast.gradle:gradle-docker-compose-plugin:0.14.12'
api 'org.apache.maven:maven-model:3.6.2'
api 'com.networknt:json-schema-validator:1.0.68'
- api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson')}"
+ api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson_databind')}"
testFixturesApi "junit:junit:${props.getProperty('junit')}"
testFixturesApi "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}"
diff --git a/buildSrc/version.properties b/buildSrc/version.properties
index 34934d63a8975..41d8aa41ac631 100644
--- a/buildSrc/version.properties
+++ b/buildSrc/version.properties
@@ -10,6 +10,7 @@ bundled_jdk = 17.0.2+8
spatial4j = 0.7
jts = 1.15.0
jackson = 2.13.2
+jackson_databind = 2.13.2.2
snakeyaml = 1.26
icu4j = 70.1
supercsv = 2.4.0
diff --git a/distribution/tools/upgrade-cli/build.gradle b/distribution/tools/upgrade-cli/build.gradle
index 0e1996f3d68fa..d29c808562168 100644
--- a/distribution/tools/upgrade-cli/build.gradle
+++ b/distribution/tools/upgrade-cli/build.gradle
@@ -15,7 +15,7 @@ dependencies {
compileOnly project(":server")
compileOnly project(":libs:opensearch-cli")
implementation "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
- implementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}"
+ implementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}"
implementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}"
testImplementation project(":test:framework")
testImplementation 'com.google.jimfs:jimfs:1.2'
diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.2.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.2.2.jar.sha1
new file mode 100644
index 0000000000000..9d9266300feef
--- /dev/null
+++ b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.2.2.jar.sha1
@@ -0,0 +1 @@
+ffeb635597d093509f33e1e94274d14be610f933
\ No newline at end of file
diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.2.jar.sha1
deleted file mode 100644
index 5d356f3fd045f..0000000000000
--- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.13.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-926e48c451166a291f1ce6c6276d9abbefa7c00f
\ No newline at end of file
diff --git a/libs/dissect/build.gradle b/libs/dissect/build.gradle
index 0f0b8407e7e6b..47f7970ea5ac0 100644
--- a/libs/dissect/build.gradle
+++ b/libs/dissect/build.gradle
@@ -34,7 +34,7 @@ dependencies {
}
testImplementation "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
testImplementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}"
- testImplementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}"
+ testImplementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}"
}
tasks.named('forbiddenApisMain').configure {
diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle
index f78dc49e9fb8a..b1d5afbe68a17 100644
--- a/modules/ingest-geoip/build.gradle
+++ b/modules/ingest-geoip/build.gradle
@@ -42,7 +42,7 @@ dependencies {
api('com.maxmind.geoip2:geoip2:2.16.1')
// geoip2 dependencies:
api("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}")
- api("com.fasterxml.jackson.core:jackson-databind:${versions.jackson}")
+ api("com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}")
api('com.maxmind.db:maxmind-db:2.0.0')
testImplementation 'org.elasticsearch:geolite2-databases:20191119'
diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.13.2.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.13.2.2.jar.sha1
new file mode 100644
index 0000000000000..9d9266300feef
--- /dev/null
+++ b/modules/ingest-geoip/licenses/jackson-databind-2.13.2.2.jar.sha1
@@ -0,0 +1 @@
+ffeb635597d093509f33e1e94274d14be610f933
\ No newline at end of file
diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.13.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.13.2.jar.sha1
deleted file mode 100644
index 5d356f3fd045f..0000000000000
--- a/modules/ingest-geoip/licenses/jackson-databind-2.13.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-926e48c451166a291f1ce6c6276d9abbefa7c00f
\ No newline at end of file
diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle
index 7998e0861c7b1..0e096958538a4 100644
--- a/plugins/discovery-ec2/build.gradle
+++ b/plugins/discovery-ec2/build.gradle
@@ -50,7 +50,7 @@ dependencies {
api "commons-logging:commons-logging:${versions.commonslogging}"
api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}"
api "commons-codec:commons-codec:${versions.commonscodec}"
- api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}"
+ api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}"
api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}"
}
diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.13.2.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.13.2.2.jar.sha1
new file mode 100644
index 0000000000000..9d9266300feef
--- /dev/null
+++ b/plugins/discovery-ec2/licenses/jackson-databind-2.13.2.2.jar.sha1
@@ -0,0 +1 @@
+ffeb635597d093509f33e1e94274d14be610f933
\ No newline at end of file
diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.13.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.13.2.jar.sha1
deleted file mode 100644
index 5d356f3fd045f..0000000000000
--- a/plugins/discovery-ec2/licenses/jackson-databind-2.13.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-926e48c451166a291f1ce6c6276d9abbefa7c00f
\ No newline at end of file
diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle
index 60fb99f459454..a491e766eb7c7 100644
--- a/plugins/repository-azure/build.gradle
+++ b/plugins/repository-azure/build.gradle
@@ -62,7 +62,7 @@ dependencies {
api 'io.projectreactor.netty:reactor-netty-http:1.0.16'
api "org.slf4j:slf4j-api:${versions.slf4j}"
api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}"
- api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}"
+ api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}"
api "com.fasterxml.jackson.datatype:jackson-datatype-jsr310:${versions.jackson}"
api "com.fasterxml.jackson.dataformat:jackson-dataformat-xml:${versions.jackson}"
api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}"
diff --git a/plugins/repository-azure/licenses/jackson-databind-2.13.2.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.13.2.2.jar.sha1
new file mode 100644
index 0000000000000..9d9266300feef
--- /dev/null
+++ b/plugins/repository-azure/licenses/jackson-databind-2.13.2.2.jar.sha1
@@ -0,0 +1 @@
+ffeb635597d093509f33e1e94274d14be610f933
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/jackson-databind-2.13.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.13.2.jar.sha1
deleted file mode 100644
index 5d356f3fd045f..0000000000000
--- a/plugins/repository-azure/licenses/jackson-databind-2.13.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-926e48c451166a291f1ce6c6276d9abbefa7c00f
\ No newline at end of file
diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle
index 19f58bf48366d..d17a4060b9ab6 100644
--- a/plugins/repository-hdfs/build.gradle
+++ b/plugins/repository-hdfs/build.gradle
@@ -64,7 +64,7 @@ dependencies {
api 'org.apache.htrace:htrace-core4:4.2.0-incubating'
api "org.apache.logging.log4j:log4j-core:${versions.log4j}"
api 'org.apache.avro:avro:1.10.2'
- api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}"
+ api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}"
api 'com.google.code.gson:gson:2.9.0'
runtimeOnly 'com.google.guava:guava:30.1.1-jre'
api 'com.google.protobuf:protobuf-java:3.19.3'
diff --git a/plugins/repository-hdfs/licenses/jackson-databind-2.13.2.2.jar.sha1 b/plugins/repository-hdfs/licenses/jackson-databind-2.13.2.2.jar.sha1
new file mode 100644
index 0000000000000..9d9266300feef
--- /dev/null
+++ b/plugins/repository-hdfs/licenses/jackson-databind-2.13.2.2.jar.sha1
@@ -0,0 +1 @@
+ffeb635597d093509f33e1e94274d14be610f933
\ No newline at end of file
diff --git a/plugins/repository-hdfs/licenses/jackson-databind-2.13.2.jar.sha1 b/plugins/repository-hdfs/licenses/jackson-databind-2.13.2.jar.sha1
deleted file mode 100644
index 5d356f3fd045f..0000000000000
--- a/plugins/repository-hdfs/licenses/jackson-databind-2.13.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-926e48c451166a291f1ce6c6276d9abbefa7c00f
\ No newline at end of file
diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle
index c5939958c816a..072683e3bd5e5 100644
--- a/plugins/repository-s3/build.gradle
+++ b/plugins/repository-s3/build.gradle
@@ -58,7 +58,7 @@ dependencies {
api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}"
api "commons-codec:commons-codec:${versions.commonscodec}"
api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
- api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}"
+ api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}"
api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}"
api "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
api "joda-time:joda-time:${versions.joda}"
diff --git a/plugins/repository-s3/licenses/jackson-databind-2.13.2.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.13.2.2.jar.sha1
new file mode 100644
index 0000000000000..9d9266300feef
--- /dev/null
+++ b/plugins/repository-s3/licenses/jackson-databind-2.13.2.2.jar.sha1
@@ -0,0 +1 @@
+ffeb635597d093509f33e1e94274d14be610f933
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/jackson-databind-2.13.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.13.2.jar.sha1
deleted file mode 100644
index 5d356f3fd045f..0000000000000
--- a/plugins/repository-s3/licenses/jackson-databind-2.13.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-926e48c451166a291f1ce6c6276d9abbefa7c00f
\ No newline at end of file
diff --git a/qa/os/build.gradle b/qa/os/build.gradle
index 038e3d16745c3..92c5e4f154ad8 100644
--- a/qa/os/build.gradle
+++ b/qa/os/build.gradle
@@ -50,7 +50,7 @@ dependencies {
testImplementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}"
testImplementation "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
- testImplementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}"
+ testImplementation "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}"
}
tasks.named('forbiddenApisTest').configure {
diff --git a/qa/wildfly/build.gradle b/qa/wildfly/build.gradle
index 7cb08a9de6f08..b7a5089451672 100644
--- a/qa/wildfly/build.gradle
+++ b/qa/wildfly/build.gradle
@@ -50,7 +50,7 @@ dependencies {
}
api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}"
api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
- api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}"
+ api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}"
api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}"
api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:${versions.jackson}"
api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}"
diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle
index d1040acd03aa7..c56cc6d196b63 100644
--- a/test/fixtures/hdfs-fixture/build.gradle
+++ b/test/fixtures/hdfs-fixture/build.gradle
@@ -41,6 +41,6 @@ dependencies {
api 'com.google.code.gson:gson:2.9.0'
api "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}"
api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}"
- api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}"
+ api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}"
api 'net.minidev:json-smart:2.4.8'
}
From bcaa06bc0ffe8d350e80b0da53af5bf2ba9f99d0 Mon Sep 17 00:00:00 2001
From: Suraj Singh <79435743+dreamer-89@users.noreply.github.com>
Date: Tue, 29 Mar 2022 09:27:28 -0700
Subject: [PATCH 20/73] Add mapping method back referenced in other repos
(#2636)
Signed-off-by: Suraj Singh
---
.../admin/indices/create/CreateIndexRequest.java | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java
index 7f1f516d13a04..26ff4f1da3ba4 100644
--- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java
+++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java
@@ -245,14 +245,22 @@ public CreateIndexRequest mapping(String mapping) {
return this;
}
+ /**
+ * Adds mapping that will be added when the index gets created.
+ *
+ * @param source The mapping source
+ * @param xContentType The content type of the source
+ */
+ public CreateIndexRequest mapping(String source, XContentType xContentType) {
+ return mapping(new BytesArray(source), xContentType);
+ }
+
/**
* Adds mapping that will be added when the index gets created.
*
* @param source The mapping source
* @param xContentType the content type of the mapping source
- * @deprecated types are being removed
*/
- @Deprecated
private CreateIndexRequest mapping(BytesReference source, XContentType xContentType) {
Objects.requireNonNull(xContentType);
Map mappingAsMap = XContentHelper.convertToMap(source, false, xContentType).v2();
From fb5cebbb9b47b35944432d5e15210f0327d32111 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 29 Mar 2022 10:46:13 -0700
Subject: [PATCH 21/73] Bump reactor-netty from 1.0.16 to 1.0.17 in
/plugins/repository-azure (#2613)
* Bump reactor-netty from 1.0.16 to 1.0.17 in /plugins/repository-azure
Bumps [reactor-netty](https://github.com/reactor/reactor-netty) from 1.0.16 to 1.0.17.
- [Release notes](https://github.com/reactor/reactor-netty/releases)
- [Commits](https://github.com/reactor/reactor-netty/compare/v1.0.16...v1.0.17)
---
updated-dependencies:
- dependency-name: io.projectreactor.netty:reactor-netty
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
* Updating SHAs
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: dependabot[bot]
---
plugins/repository-azure/build.gradle | 2 +-
plugins/repository-azure/licenses/reactor-netty-1.0.16.jar.sha1 | 1 -
plugins/repository-azure/licenses/reactor-netty-1.0.17.jar.sha1 | 1 +
3 files changed, 2 insertions(+), 2 deletions(-)
delete mode 100644 plugins/repository-azure/licenses/reactor-netty-1.0.16.jar.sha1
create mode 100644 plugins/repository-azure/licenses/reactor-netty-1.0.17.jar.sha1
diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle
index a491e766eb7c7..628b5f7c58c04 100644
--- a/plugins/repository-azure/build.gradle
+++ b/plugins/repository-azure/build.gradle
@@ -57,7 +57,7 @@ dependencies {
api 'com.azure:azure-storage-blob:12.14.4'
api 'org.reactivestreams:reactive-streams:1.0.3'
api 'io.projectreactor:reactor-core:3.4.15'
- api 'io.projectreactor.netty:reactor-netty:1.0.16'
+ api 'io.projectreactor.netty:reactor-netty:1.0.17'
api 'io.projectreactor.netty:reactor-netty-core:1.0.16'
api 'io.projectreactor.netty:reactor-netty-http:1.0.16'
api "org.slf4j:slf4j-api:${versions.slf4j}"
diff --git a/plugins/repository-azure/licenses/reactor-netty-1.0.16.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-1.0.16.jar.sha1
deleted file mode 100644
index 582380e449a1d..0000000000000
--- a/plugins/repository-azure/licenses/reactor-netty-1.0.16.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-d90829f6127966b0c35c4a3e8e23ca9ed29cd8a5
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/reactor-netty-1.0.17.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-1.0.17.jar.sha1
new file mode 100644
index 0000000000000..a1f6aa3686692
--- /dev/null
+++ b/plugins/repository-azure/licenses/reactor-netty-1.0.17.jar.sha1
@@ -0,0 +1 @@
+7720beb4f58a4379e6294d62766d2e9e1bfaf646
\ No newline at end of file
From ec4fe7066b80b684c719630bf9609bb4bedd5a90 Mon Sep 17 00:00:00 2001
From: Mohit Godwani <81609427+mgodwan@users.noreply.github.com>
Date: Tue, 29 Mar 2022 23:30:12 +0530
Subject: [PATCH 22/73] Make discovered_master field optional on the client to
support compatibility for opensearch client with odfe (#2641)
Signed-off-by: Mohit Godwani
---
.../cluster/health/ClusterHealthResponse.java | 4 ++--
.../health/ClusterHealthResponsesTests.java | 23 ++++++++++++++++++-
2 files changed, 24 insertions(+), 3 deletions(-)
diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java
index d9094e307fff1..841231c971eaa 100644
--- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java
+++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java
@@ -90,7 +90,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
// ClusterStateHealth fields
int numberOfNodes = (int) parsedObjects[i++];
int numberOfDataNodes = (int) parsedObjects[i++];
- boolean hasDiscoveredMaster = (boolean) parsedObjects[i++];
+ boolean hasDiscoveredMaster = Boolean.TRUE.equals(parsedObjects[i++]);
int activeShards = (int) parsedObjects[i++];
int relocatingShards = (int) parsedObjects[i++];
int activePrimaryShards = (int) parsedObjects[i++];
@@ -151,7 +151,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
// ClusterStateHealth fields
PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_NODES));
PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_DATA_NODES));
- PARSER.declareBoolean(constructorArg(), new ParseField(DISCOVERED_MASTER));
+ PARSER.declareBoolean(optionalConstructorArg(), new ParseField(DISCOVERED_MASTER));
PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_SHARDS));
PARSER.declareInt(constructorArg(), new ParseField(RELOCATING_SHARDS));
PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_PRIMARY_SHARDS));
diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java
index decad9d6f840e..5af15396dbefa 100644
--- a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java
+++ b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java
@@ -228,7 +228,7 @@ public void testParseFromXContentWithDiscoveredMasterField() throws IOException
NamedXContentRegistry.EMPTY,
DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
"{\"cluster_name\":\"535799904437:7-1-3-node\",\"status\":\"green\","
- + "\"timed_out\":false,\"number_of_nodes\":6,\"number_of_data_nodes\":3,\"discovered_master\":false,"
+ + "\"timed_out\":false,\"number_of_nodes\":6,\"number_of_data_nodes\":3,\"discovered_master\":true,"
+ "\"active_primary_shards\":4,\"active_shards\":5,\"relocating_shards\":0,\"initializing_shards\":0,"
+ "\"unassigned_shards\":0,\"delayed_unassigned_shards\":0,\"number_of_pending_tasks\":0,"
+ "\"number_of_in_flight_fetch\":0,\"task_max_waiting_in_queue_millis\":0,"
@@ -236,6 +236,27 @@ public void testParseFromXContentWithDiscoveredMasterField() throws IOException
)
) {
+ ClusterHealthResponse clusterHealth = ClusterHealthResponse.fromXContent(parser);
+ assertNotNull(clusterHealth);
+ assertThat(clusterHealth.getClusterName(), Matchers.equalTo("535799904437:7-1-3-node"));
+ assertThat(clusterHealth.getNumberOfNodes(), Matchers.equalTo(6));
+ assertThat(clusterHealth.hasDiscoveredMaster(), Matchers.equalTo(true));
+ }
+ }
+
+ public void testParseFromXContentWithoutDiscoveredMasterField() throws IOException {
+ try (
+ XContentParser parser = JsonXContent.jsonXContent.createParser(
+ NamedXContentRegistry.EMPTY,
+ DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
+ "{\"cluster_name\":\"535799904437:7-1-3-node\",\"status\":\"green\","
+ + "\"timed_out\":false,\"number_of_nodes\":6,\"number_of_data_nodes\":3,"
+ + "\"active_primary_shards\":4,\"active_shards\":5,\"relocating_shards\":0,\"initializing_shards\":0,"
+ + "\"unassigned_shards\":0,\"delayed_unassigned_shards\":0,\"number_of_pending_tasks\":0,"
+ + "\"number_of_in_flight_fetch\":0,\"task_max_waiting_in_queue_millis\":0,"
+ + "\"active_shards_percent_as_number\":100}"
+ )
+ ) {
ClusterHealthResponse clusterHealth = ClusterHealthResponse.fromXContent(parser);
assertNotNull(clusterHealth);
assertThat(clusterHealth.getClusterName(), Matchers.equalTo("535799904437:7-1-3-node"));
From 8ea246e70ee53b0bbd668435e63b7c8061576bce Mon Sep 17 00:00:00 2001
From: Owais Kazi
Date: Tue, 29 Mar 2022 18:26:04 -0700
Subject: [PATCH 23/73] Changed JAVA_HOME to jdk-17 (#2656)
Signed-off-by: Owais Kazi
---
jenkins/jenkinsfile | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/jenkins/jenkinsfile b/jenkins/jenkinsfile
index 113cb27c4a610..96973fceea765 100644
--- a/jenkins/jenkinsfile
+++ b/jenkins/jenkinsfile
@@ -3,7 +3,7 @@ pipeline {
docker {
label 'AL2-X64'
/* See
- https://github.com/opensearch-project/opensearch-build/blob/main/docker/ci/dockerfiles/build.ubuntu18.opensearch.x64.dockerfile
+ https://hub.docker.com/layers/ci-runner/opensearchstaging/ci-runner/ci-runner-ubuntu1804-build-v1/images/sha256-2c7bb2780bc08cd4e7e3c382ac53db414754dabd52f9b70e1c7e344dfb9a0e5e?context=explore
for docker image
*/
image 'opensearchstaging/ci-runner:ci-runner-ubuntu1804-build-v1'
@@ -16,7 +16,7 @@ pipeline {
JAVA14_HOME="/opt/java/openjdk-14"
JAVA17_HOME="/opt/java/openjdk-17"
JAVA8_HOME="/opt/java/openjdk-8"
- JAVA_HOME="/opt/java/openjdk-14"
+ JAVA_HOME="/opt/java/openjdk-17"
}
stages {
From 65cc56e754e4a854963663ead5d06ea6e975d1eb Mon Sep 17 00:00:00 2001
From: Kunal Kotwani
Date: Tue, 29 Mar 2022 20:09:32 -0700
Subject: [PATCH 24/73] Gradle check retry (#2638)
* Add retry plugin support for Test implementations
Signed-off-by: Kunal Kotwani
* Update test retry parameters
Signed-off-by: Kunal Kotwani
* Remove CI environment check for test retries
Signed-off-by: Kunal Kotwani
* Update retry count for tests
Signed-off-by: Kunal Kotwani
---
build.gradle | 21 +++++++++++++++++----
1 file changed, 17 insertions(+), 4 deletions(-)
diff --git a/build.gradle b/build.gradle
index be5766f327e0d..bfa435cb4812c 100644
--- a/build.gradle
+++ b/build.gradle
@@ -49,6 +49,7 @@ plugins {
id 'opensearch.docker-support'
id 'opensearch.global-build-info'
id "com.diffplug.spotless" version "6.3.0" apply false
+ id "org.gradle.test-retry" version "1.3.1" apply false
}
apply from: 'gradle/build-complete.gradle'
@@ -232,7 +233,7 @@ allprojects {
tasks.withType(JavaCompile).configureEach { JavaCompile compile ->
// See please https://bugs.openjdk.java.net/browse/JDK-8209058
if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_11) {
- compile.options.compilerArgs << '-Werror'
+ compile.options.compilerArgs << '-Werror'
}
compile.options.compilerArgs << '-Xlint:auxiliaryclass'
compile.options.compilerArgs << '-Xlint:cast'
@@ -386,6 +387,18 @@ gradle.projectsEvaluated {
}
}
+// test retry configuration
+subprojects {
+ apply plugin: "org.gradle.test-retry"
+ tasks.withType(Test).configureEach {
+ retry {
+ failOnPassedAfterRetry = false
+ maxRetries = 3
+ maxFailures = 10
+ }
+ }
+}
+
// eclipse configuration
allprojects {
apply plugin: 'eclipse'
@@ -445,9 +458,9 @@ allprojects {
tasks.named('eclipse') { dependsOn 'cleanEclipse', 'copyEclipseSettings' }
afterEvaluate {
- tasks.findByName("eclipseJdt")?.configure {
- dependsOn 'copyEclipseSettings'
- }
+ tasks.findByName("eclipseJdt")?.configure {
+ dependsOn 'copyEclipseSettings'
+ }
}
}
From b5b0cd1b3aaf2b20dc3f357cae10600c04f8d4bf Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 30 Mar 2022 13:52:54 -0400
Subject: [PATCH 25/73] Bump jboss-annotations-api_1.2_spec in /qa/wildfly
(#2615)
Bumps [jboss-annotations-api_1.2_spec](https://github.com/jboss/jboss-annotations-api_spec) from 1.0.0.Final to 1.0.2.Final.
- [Release notes](https://github.com/jboss/jboss-annotations-api_spec/releases)
- [Commits](https://github.com/jboss/jboss-annotations-api_spec/compare/jboss-annotations-api_1.1_spec-1.0.0.Final...jboss-annotations-api_1.2_spec-1.0.2.Final)
---
updated-dependencies:
- dependency-name: org.jboss.spec.javax.annotation:jboss-annotations-api_1.2_spec
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
qa/wildfly/build.gradle | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/qa/wildfly/build.gradle b/qa/wildfly/build.gradle
index b7a5089451672..0e1c566bd2b52 100644
--- a/qa/wildfly/build.gradle
+++ b/qa/wildfly/build.gradle
@@ -40,7 +40,7 @@ testFixtures.useFixture()
dependencies {
providedCompile 'javax.enterprise:cdi-api:1.2'
- providedCompile 'org.jboss.spec.javax.annotation:jboss-annotations-api_1.2_spec:1.0.0.Final'
+ providedCompile 'org.jboss.spec.javax.annotation:jboss-annotations-api_1.2_spec:1.0.2.Final'
providedCompile 'org.jboss.spec.javax.ws.rs:jboss-jaxrs-api_2.0_spec:1.0.0.Final'
api('org.jboss.resteasy:resteasy-jackson2-provider:3.0.19.Final') {
exclude module: 'jackson-annotations'
From e051a426d2d0b7e986bd81feb82ec7cb1baf674f Mon Sep 17 00:00:00 2001
From: Suraj Singh <79435743+dreamer-89@users.noreply.github.com>
Date: Wed, 30 Mar 2022 21:33:17 -0700
Subject: [PATCH 26/73] Add 1.3.2 to main causing gradle check failures (#2679)
Signed-off-by: Suraj Singh
---
.ci/bwcVersions | 1 +
server/src/main/java/org/opensearch/Version.java | 1 +
2 files changed, 2 insertions(+)
diff --git a/.ci/bwcVersions b/.ci/bwcVersions
index ddc36af48d674..de840b910ada2 100644
--- a/.ci/bwcVersions
+++ b/.ci/bwcVersions
@@ -38,4 +38,5 @@ BWC_VERSION:
- "1.2.5"
- "1.3.0"
- "1.3.1"
+ - "1.3.2"
- "1.4.0"
diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java
index f74e529c442bb..eb6a80d37d83d 100644
--- a/server/src/main/java/org/opensearch/Version.java
+++ b/server/src/main/java/org/opensearch/Version.java
@@ -80,6 +80,7 @@ public class Version implements Comparable, ToXContentFragment {
public static final Version V_1_2_5 = new Version(1020599, org.apache.lucene.util.Version.LUCENE_8_10_1);
public static final Version V_1_3_0 = new Version(1030099, org.apache.lucene.util.Version.LUCENE_8_10_1);
public static final Version V_1_3_1 = new Version(1030199, org.apache.lucene.util.Version.LUCENE_8_10_1);
+ public static final Version V_1_3_2 = new Version(1030299, org.apache.lucene.util.Version.LUCENE_8_10_1);
public static final Version V_1_4_0 = new Version(1040099, org.apache.lucene.util.Version.LUCENE_8_10_1);
public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0);
public static final Version CURRENT = V_2_0_0;
From ff7805e6cadcd6879abb9f273763c9825aa058ac Mon Sep 17 00:00:00 2001
From: Andriy Redko
Date: Thu, 31 Mar 2022 17:41:12 -0400
Subject: [PATCH 27/73] Update to Gradle 7.4.2 (#2688)
Signed-off-by: Andriy Redko
---
gradle/wrapper/gradle-wrapper.jar | Bin 59821 -> 59536 bytes
gradle/wrapper/gradle-wrapper.properties | 4 ++--
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar
index 41d9927a4d4fb3f96a785543079b8df6723c946b..7454180f2ae8848c63b8b4dea2cb829da983f2fa 100644
GIT binary patch
delta 8722
zcmY*;Wn2_c*XJ;R(j_4+E#1=H-QC^YIm8gsFf@+D&?(ZAlF}t5odeR+{krb6yU*TF
z|2X&D{M`@d*32TNOe20l5=0ho#^2I~pbD~q^aFzN{Rm#3zYeiL5N6aRiR|+XoxRvM
znZSLLlAJDh@2J2?#n2A?qar%tzN-5NQO
zL&|F{nGiQyzNJ+bM$Y`n=Lx^3wTG^o2bGB@cwr1eb+6c-1tN=U+Db;bc~eJ!hwM{SbI=#g?$!PjDB+)
zPgU_2EIxocr*EOJG52-~!gml&|D|C2OQ3Y(zAhL}iae4-Ut0F*!z!VEdfw8#`LAi#
zhJ_EM*~;S|FMV6y%-SduHjPOI3cFM(GpH|HES<}*=vqY+64%dJYc|k?n6Br7)D#~#
zEqO(xepfaf2F{>{E2`xb=AO%A<7RtUq6kU_Iu0m?@0K(+<}u3gVw5fy=Y4CC*{IE3
zLP3YBJ7x+U(os5=&NT%gKi23bbaZ`@;%ln)wp4GpDUT$J8NtFDHJzIe_-t}{!HAsh
zJ4<^WovY};)9IKAskSebdQiXv$y5}THuJZ}ouoElIZRui=6lrupV|_Jz=9^&;@HwL;J#@23k?A;k`0Bgf;ioO>W`IQ+4?
z7A)eKoY4%+g%=w;=Vm8}H>@U*=*AWNtPqgWRqib#5RTGA@Q=43FrQn3J`GkTUV5yp0U`EOTqjfp+-9;0F8!dMEwwcK%(6`8sDD^aR04
zd6O5vh|Xk?&3dy4f|1QK&Ulf{h6Iq;d-&*ti#Ck>wZFG;GHwc?b;X~eBITx49>2d8
z4HcK&1&DvEGT6kXdzAm4oO8%c}8OBt~8H956_;YP-ss*uMf==a+%w~F>Qkm7r)IAuxuoX}h92$gHqbFUun#8m
zWHdy`Zrm#=Pa98x8cO0vd@Tgkr*lm0{dky+Gocr0P8y%HGEI#c3qLqIRc`Oq_C%*;
zG+QTr(#Q|yHKv6R@!DmLlwJQ3FAB)Yor-I4zyDyqM4yp5n2TrQH>gRt*Zw0+WI-Sj`EgmYHh=t9!
zF6lz^xpqGGpo6!5`sc0a^FVhy_Uxq|@~(1@IIzV)nTpY9sY`CV!?8e&bB8=M&sYEb
z2i}fvKdhp9Hs68Y-!QJ<=wE(iQ5+49tqt;Rh|jhYrI5VW-mIz|UY{h8E=rC5sh#DU
z?wGgk-Tn!I?+Zer7pHlF_Z^!Kd1qkS3&lv#%s6-<5Y%jQL${cge5=G5Ab?D&|9$Y~
zf%rJC2+=2vg;y0-SJb3<@3%}BO$T$C66q$L_H33a`VUbgW~N(4B=v5(<=My|#|J7q
z*Ox4wL4kbJd_~EjLTABSu4U7Jk#`y(6O*U6(k6XxM}CtGZB(H@3~kh*zaGRXM}Iwp
zQ%xFk2>@wiZrVCV_G4G~v;NebCQ%T7{SDyPpSv&dT@Cn)Mx@IK*IdNrj{*4pkV4wv
z)y0J538h>cpB7iPSzA~x24T`{dzNkpvGIqvt1Dvdq@o-`B=$hkczX8$yFMhsWNK-X
zxr$kR$tMD0@W)Vxe1^t9qVmsg&K^F@u84)(n2dttIEAZFN6VD$&tskpG%SI7whGL3
z)DeRiwe&?8m7U{G`oW8!SCi*dM>oYL%UKQnKxV_0RXAEBQg1kStExGEUVwLJ0orGGwb7uv+kPDl7_E2*iD|J*=8A@;XCvwq0aw5oJYN*Yh&o=l}
z2z8YKb-fIAH5spql4eXqp*)o2*b>#1@DSt?zZi{GPj0gH&Nm+EI<3^z0w%YTEV4xw
zI6$+=Faa|Y4o5i0zm5lOg|&tmnJ806DBovU@Ll6XsA;NRrTK~t*AAJIAS=v-UZ%Pr
z$oddI@NRir&erzCwq|)ciJemr-E061j{0Vc@Ys7K(mW|JYj*$+i1Q8XlIK8T?TYS(AXu$`2U
zQ@fHxc=AVHl_}cRZQ)w0anMEoqRKKIvS^`<-aMf*FM`NsG&Uowneo+Ji$7DUDYc7*Hjg;-&aHM%3
zXO6cz$$G};Uqh+iY7Wpme>PHG4cu(q;xyskNLs$^uRRMfEg?8Cj~aE-ajM%CXkx0F
z>C?g3tIA#9sBQOpe`J+04{q7^TqhFk^F1jFtk4JDRO*`d-fx`GYHb=&(JiaM1b?Y^
zO3Kj3sj76ieol|N$;>j@t#tKj=@*gP+mv}KwlTcPYgR$+)2(gk)2JNE=jSauPq!$<
z<|?Sb%W)wS)b>b6i{8!x!^!xIdU3{CJFVnTcw0j{M%DUCF=_>eYYEUWnA-|B(+KYL
z_W_`JI&&u^@t0})@DH^1LDuT0s3dMpCHIbYBgOT4Zh_4yHbSqRbtIKndeT4Q*Jg91
z@>rO!^t-G~*AIW;FQ$3J=b;oGg8?CTa~qNCb>&cgp@e;?0AqA&paz~(%PYO+QBo4(
zp?}ZdSMWx0iJm7HVNk9A#^9Osa#GPJ!_pYEW}($8>&2}fbr@&ygZ?${A7_9?X$(&5
z#~-hxdPQwCNEpf=^+WH-3`2LxrrBMTa}~qJC9S;VzhG!On^JLyW6WkF{8aAE$sM+(
zxr8xLW(KIjI`Rm(24r3OJBk<3GF=G!uSP0-G&AY32mLm8q=#Xom&Pqv=1C{d3>1^
zAjsmV@XZ%BKq^eUfBpa8KvO8ob|F3hAjJv*yo2Bhl0)KUus{qA9m8jf)KnOGGTa6~4>3@J_VzkL|vYPl*uL+Ot*Q7W!f5rJw5+AsjP_IfL+-S*2p|
zB7!FhjvkUTxQkGWGSg{X;h~dK>gAJivW?88Nu!3o>ySDaABn$rAYt086#27fbjPQS
zhq>55ASvm*60qRdVOY9=bU^+{Pi#!OaZwENN;zy5?EztOHK-Q5;rCuiFl}BSc1YaQ
zC-S{=KsGDz@Ji9O5W;XxE0xI|@3o6(2~i4b8Ii9VT;^G$*dRw(V?=br)D&q^XkeBX
z+gl~+R@rVD-Hwv@7RHV?Bip5KMI)aV^&snt?H<$Nt=OPx#VxF&BGi?2A2+lNOYywNUGMeGL;|(=UjGDtLG0sN&LpGx;|U;xa13s
z;W_|SPk^G}!M9_^pO
zA3bt3-tca%^42sHeDtfcC0S3w3H1ny!Bxpa=*k?XRPpx9Bb-gx1J9Yvx)4J(8cG+q
z(iCPZ9dsf3#QVyZgD_MW#G#qgV)olu$59&3(PzQfw@%4uZ~<5J=ABvdY43(Qnp{;G
zHg3>@T#>DbTuhFl3)fb3TFqdh)V2aq7!;&JOHseTWukvA7}(iGUq;v-{2J0iHSNHq
z;+)h!p6Ok^+Sp8-jgL($n6Qu47xyE`cFO5SdZR6;R!FET`tm#0D37z339Suxjpv+s
z*=%2-N$N?X&0?x_uut3erF@aBGj;9$k9?3FlbDO{RQa1_qtxrh4!4#fjp4x~akvdTp@
zos?^Q&XE;3N93s4rHQGPrV7+au1$$aB6$hLy*Yz_kN$~dweb9PcB!eYVQTGjFuJP>
zZCEwBtb>TIgIO^qAzq@Bv-qud_ZD-2W<_at&ml-gv`tPt$@DF5`HlA
zM>DmmMkpv&Zm-8)Y#0bLQf4MpD4_-7M8eu6rh(tL8dq8onHs#R9J~dGd2IaXXMC~h
z91pKhnQa%Fsn29nAA1;x(%oC
zhca~qQDJaMf?wFrl-Pj;e$bZMYmMF!Y3Lv&Sb?Sjn#!NVx&NDyc^$b4uYyo2OmERa
zRz;yDGd@JTykzFLe|Wk-y7#3x`6$wt$zR8r48mdUvfbeL+4D|Z``~7$PrE@qc7rZe
zVsIoIbCwzjLZ@_M1*bD{HaYn();Z1-q*-I{tEnTZ(}Zmk&%MXSNBX>o|
z-u*RNkAyKC-Srp7c-=@5f)xMWg>o2WWl}j6j9=8+D8;T
z>0*0q#;qw8%U8i;6s0fu#I*%(g*@@a2Er@@nyI}{=@W{Z-;`=wN4N~>6Xrh&z#g}l
zN1g5}0-#(nHUTv_rl2{yUZ;h#t&Fd?tY!7L%ClY)>uH-Ny2ET$lW$S)IQiN79H)D^
zb&0AXYkupy0~w8)*>Sj_p9}4L?lGTq%VG|2p`nWGhnM^!g|j-|O{%9Q%swOq63|*W
zw$(N_laI}`ilB+o!a-wl?er~;;3+)$_akSQ!8YO_&-e*SI7n^(QQ;X0ZE`{4f!gAl
z5$d+9CKVNonM!NO_frREICIAxOv)wm>}-k?iRisM`R7;=lyo|E_YR~FpS&PS`Lg0f
zl-ON<0S%Uix8J%#yZdkCz4YNhcec<|7*P(JsM#>-L>+tYg_71q9~70FAc^6KW5jql
zw!crdgVLH1G_eET=|SEc977;)ezVC|{PJZfra|}@rD;0s&@61mTEBJtILllg{%{vN
zfhb&lq0yChaLhnJ-Qb62MB7`>M;|_ceHKZAeeh@#8tbrK!ArP6oXIhMK;dhEJTY`@
z0Tq>MIe0`7tGv)N*F0IGYSJv0vN?Az8g+4K9S!pW2~9F4W(_U_T=jCZrzuZ3*|__T
zONp_UWmyePv8C~rckc?Xji;Z5OEqg
zC*Um)i;Wh4TEwqReQdVVbUKT^2>Tpi6z_^-uF*adUFug4i@JhzpWT^Sk&E>CyP2?H
zWf6x}ehuTs6wvzCnTU&gYzT029Nz19(In1WC
z`(1IGmi!O%2AR|BjQa4Q0~u)kM%}?xQyjWuQ16^Gp++;`vr7!k--UZWM*~7Zl|ceO@I3`OpaRhD;YoCuo5IC0uHx>9
z478hu@H|e0Zlo)Zj@01#;8BDs@991xe~^9uG2}UXLM(m7fa}AMwX*tjioBeV&Q8Gx
zSq$6wZFkRBK`cMI>R(@W@+lo2t)L+4q-negWRLWZBz*|%=W4v62JrmzNuOtA*x)QE
z5L%=OH#@KMdB%Jp^r?0tE}5-*6oP`-lO7Sf)0)n*e<{HA=&qhLR)oD8-+V}Z4=md)
z+k9lKf64DB2hAT)UaCP~di?-V3~JBH7itYyk~L6hrnxM%?RKntqd`=!b|e7eFnAcu
z3*V;g{xr7TSTm$}DY%~SMpl>m{Sj!We+WfxSEor?YeiAxYUy25pn(?T()E>ByP^c@
zipwvWrhIK((R((VU+;@LmOnDu)ZXB3YArzzin!Z^0;PyJWnlfflo|q8(QY;o1*5CO
z##hnkO{uynTMdk`~DOC#1
zdiYxQoy}=@7(ke#A8$YZZVtk4wo$8x28&I;cY3Ro-|kW=*yiiHgCLZeAr)UtVx>Tu
z|LvL0hq|1-jC0I4x#>&QZCfrVB=zT!nR|~Uz`9%~2
znl{uZ{VEszW`Fad^q_HB!K9*|U-stK%?~;g?&&+12A}Rq$z($Bzuk^2X(Y=hF?-dQ
ztc3DsQKI;qhWIV`99Q#R3xnU0AvY!i*BECj-z9l74|%O=V@nlv|qqC^r^-~C?E
zGW%c|uYgnfJ(gjsTm_cIqcv*mYM{+i+&@F@+69ZQOK&u#v4oxUSQJ=tvqQ3W=*m;|
z>SkBi8LYb-qRY7Sthh*0%3XAC%$z1rhOJzuX=PkTOa=DlocZUpE#KxVNH5)_4n=T(
zGi3YrH7e~sPNYVBd~Grcq#CF~rN{p9Zza-Ntnwfma@TB)=3g36*0lSZg#ixEjFe%+
zX=&LDZ5zqculZ`=RYc^ln(~;nN|Qh6gN=!6f9-N2h+3NWbIxYud&;4SX*tWf5slk4
z{q@@l71UAZgj~*6edXb57fBUxvAS7s(RI=X868JM0+^DCn2yC>;v%S;qPOjB>YVsz(Zx9a>>BK&M
zIQK>7_n)4ud0X5YM}^i*keH{ehLsiy9@NvOpsFeQjdI6anLGvVbBw_*fU1TzdVS$i
z*4j7z!I5RF#rSz|8ibi$;qE{4`aqWYik7QB5U&F5C*;TO_x+gtzPGpzNt!7~nsBT7)Ckc(K~%uv&{{6A`mmBJVAk-{s~52Vu|HbCH7_W1~ZCX^RflOakGg=jo2Z
z<*s;5-J+2@^LRDZ-7EV&Pq+FTErw@pfFqvx^i%E7Fx#^n(E`m2(c>K-O5`M`Yek9el
zzTGs5qD6*G;y#~xu3>qWuO?-amKYtvRA}I9z#UspEeM;wOERYeot_n_EUMJf$4_u?E!6X~?q)tPoZb^_;8Y_Ox2h1m<+Le-fsRd|T8db<8#$bqez
zua^Z|>h%zdnuU^ww$#-dZ9NTM`FN+!IlLkz*FqWb!x^Z|C{KyGjZ+>G;;7Mb@LY|H
zc+Gp`L((Dw7pnDlHNm&;SfHedhx*kad$I^uGz{`0BYelq0yEUHpNKSkvj$|dpvY3{7*YGyhXA^LP0&wOw9oNoC=QoVx1<2Dne8qqZL
zm>nFh5DX(-RnQwvHCZQwn^#Z=E!SPVlaRJ78Bo@}!!9dRt^qZy?-*`Pt4WSmgucJv
zV1yFkcjlEM^uz-;b#Q7ZCP@Lk)m}uPX={R4B=56k7WNh11BN~0T*vr@!!ow^B0hOR
zQ)4)&(e%>bNNL%bm<&8H{*l_L7s0$2GUgX2Vd;=4d9Dm2v3TaL+;L>{K7h7
zV#k?xDPm(NDE31$
z<}|X)pEY6myjK+^gaIMk&Yj2~F0rSKemNqlsVm4c|N7mp_C*L01s;GNx#D-*&gk!qQr}^?_r@q!8fuXw!)fA7xkd}
zb>vHvdx~H$5qqAWrow7}+8zBM65-JOt5z
za=T6f7MK`XJuQog8kIEboPdhcaVJeHy)5z7EBLK5NRr()E|#K0L0N^JD@pUA^Czb`
zbUZ_558y+vqAGeyHCbrvOvLD67Ph}06959VzQ_|>RrXQAqE+AQ(-AaKdxoWaF8hdt
z{O3W@b^*o#-f1VuU>YMV03ELF7zkCN4Q&b#prz%3Nne0lSbRo@@
z^ihv%oIl~Qyl6Q;a#$*jOC%x0_;eis*)J7=f@Ct*)xF5
zo}u~@-I}2|$b%5L7>@+Z?4o+1r&v6ceIy+vroK&jCQ<4q&45HP2wCol4hVm3pZtjf
zHz1D7oyaSKJ~T{Gx}7ONLA)D5k(%%`WswrDyzX*rn}i}}TB4^y#@mAwPzoC)`?rYv
zHgx|trUN#mu*VzUV~8TnJM2Qh*ZM5B{x&y>5An`(M7=Z*Q>TdiH@j*2=moNuOtvpz
z+G`@~-`%~+AgPKgke@XiRPgndh@bp*-HRsh;HTtz@-y_uhb%7ylVOTqG0#u?Vn5c5
zEp*XRo|8hcgG^$#{$O9CJ&NE;TrfRpSnLmes&MO{m=N%zc`}gb!eQ7odl$oy1%PI}
z#AIxx%oRVy&{O~9xnK4$EY>(eQj}!HKIV$Fz*H=-=Kn)N0D6u`(;iO|VraI4fu_W`
z;b5{7;Lyx4za}DU#+U7}=H0dAS#YJJ&g2!P@Htu-AL&w=-)*%P9h2{wR|@?Ff9~)b
z^+e_3Hetq7W%ls{!?<6&Y$Z;NNB41pvrv)|MET6AZXFXJeFqbFW5@i5WGzl?bP+~?
z*&_puH;wKv2)9T_d+P`bLvJFqX#j&xa*-;0nGBbQf0DC>o~=J_Wmtf*2SZQr?{i~X
z9-IbRH8{iy?<0v9Ir1?$66+igy|yDQ5J~A9sFX@Pe<*kCY8+MwH?I
z`P}zfQ6l^AO8ehZ=l^ZR;R%uu4;BK*=?W9t|0{+-at(MQZ(CtG=EJFNaFMlKCMXu30(gJUqj5+
z`GM|!keqcj;FKTa_qq;{*dHRXAq157hlB@kL#8%yAm2AgfU|*rDKX@FLlp=HL8ddv
zAWLCHe@DcDeB2}fl7#=0+#<05c3=VqM*O3bkr@9X4nO|)q0hU;Gye{L8ZN*NH8Id@mP-u;Fmb8YuorjLrW&ndip8CN%_qp982r
w1WEnz9^$&s1hkp_3#lPJQ~!HI7WYYjA7>z!`?f%npAh2%rB@vD|Lau$2O)#1n*aa+
delta 8958
zcmY+KWl$VIlZIh&f(Hri?gR<$?iyT!TL`X;1^2~W7YVSq1qtqM!JWlDxLm%}UESUM
zndj}Uny%^UnjhVhFb!8V3s(a#fIy>`VW15{5nuy;_V&a5O#0S&!a4dSkUMz_VHu3S
zGA@p9Q$T|Sj}tYGWdjH;Mpp8m&yu&YURcrt{K;R|kM~(*{v%QwrBJIUF+K1kX5ZmF
zty3i{d`y0;DgE+de>vN@yYqFPe1Ud{!&G*Q?iUc^V=|H%4~2|N
zW+DM)W!`b&V2mQ0Y4u_)uB=P@-2`v|Wm{>CxER1P^
z>c}ZPZ)xxdOCDu59{X^~2id7+6l6x)U}C4Em?H~F`uOxS1?}xMxTV|5@}PlN%Cg$(
zwY6c}r60=z5ZA1L
zTMe;84rLtYvcm?M(H~ZqU;6F7Evo{P7!LGcdwO|qf1w+)MsnvK5^c@Uzj<{
zUoej1>95tuSvDJ|5K6k%&UF*uE6kBn47QJw^yEG;u^Z9oYWrK(+oL97hBsUMc_^
z;-lmxebwlB`Er_kXp2$`&o+rPJAN<`WX3ws2K{q@qUp}XTfV{t%KrsZ5vM!Q#4{V&
zq>iO$MCiLq#%wXj%`W$_%FRg_WR*quv65TdHhdpV&jlq<=K^K`&!Kl5mA6p4n~p3u
zWE{20^hYpn1M}}VmSHBXl1*-)2MP=0_k)EPr#>EoZukiXFDz?Di1I>2@Z^P$pvaF+
zN+qUy63jek2m59;YG)`r^F3-O)0RDIXPhf)XOOdkmu`3SMMSW(g+`Ajt{=h1dt~ks
ztrhhP|L4G%5x79N#kwAHh5N){@{fzE7n&%dnisCm65Za<8r_hKvfx4Bg*`%-*-Mvn
zFvn~)VP@}1sAyD+B{{8l{EjD10Av&Mz9^Xff*t`lU=q=S#(|>ls520;n3<}X#pyh&
z*{CJf7$*&~!9jMnw_D~ikUKJ2+UnXmN6qak{xx%W;BKuXt7@ky!LPI1qk?gDwG@@o
zkY+BkIie>{{q==5)kXw(*t#I?__Kwi>`=+s?Gq6X+vtSsaAO&Tf+Bl$vKnzc&%BHM
z=loWOQq~n}>l=EL(5&6((ESsQC3^@4jlO5Od{qN#sWV)vqXw}aA>*uvwZopNN(|-T
zRTF%5Y_k1R$;(d-)n;hWex{;7b6KgdAVE@&0pd(*qDzBO#YZV%kh%pYt1`hnQ(Fa&
zYiDrOTDqk5M7hzp9kI2h!PxNnuJ&xl*zF8sx6!67bA49R1bmUF5bpK&&{eI0U~cH}PM
z3aW1$lRb|ItkG5~_eBNu$|I|vYIdAA9a!pVq<+UTx*M}fG`23zxXp&E=FfnY-
zEzKj;Cu_s4v>leO7M2-mE(UzKHL4c$c`3dS*19OpLV^4NI*hWWnJQ9lvzP4c;c?do
zqrcsKT*i~eIHl0D3r4N{)+RsB6XhrC^;sp2cf_Eq#6*CV;t8v=V!ISe>>9kPgh}NI
z=1UZutslxcT$Ad;_P^;Oouoa(cs!Ctpvi>%aQ+Zp=1d|h{W9Wmf7JWxa(~<#