diff --git a/docs/reference/analysis/tokenfilters/flatten-graph-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/flatten-graph-tokenfilter.asciidoc
index 1495e8a91b2a7..bcff83c5e9950 100644
--- a/docs/reference/analysis/tokenfilters/flatten-graph-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/flatten-graph-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-flatten-graph-tokenfilter]]
-=== Flatten Graph Token Filter
+=== Flatten graph token filter
+++++
+Flatten graph
+++++
experimental[This functionality is marked as experimental in Lucene]
diff --git a/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc
index 2f258b00ee96b..39f584bffe694 100644
--- a/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-hunspell-tokenfilter]]
-=== Hunspell Token Filter
+=== Hunspell token filter
+++++
+Hunspell
+++++
Basic support for hunspell stemming. Hunspell dictionaries will be
picked up from a dedicated hunspell directory on the filesystem
diff --git a/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc
index ea9dcad8a6ca9..6ee9f41f777d1 100644
--- a/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-keyword-marker-tokenfilter]]
-=== Keyword Marker Token Filter
+=== Keyword marker token filter
+++++
+Keyword marker
+++++
Protects words from being modified by stemmers. Must be placed before
any stemming filters.
diff --git a/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc
index ca15b2da5a8f5..58d86596bbf04 100644
--- a/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-keyword-repeat-tokenfilter]]
-=== Keyword Repeat Token Filter
+=== Keyword repeat token filter
+++++
+Keyword repeat
+++++
The `keyword_repeat` token filter Emits each incoming token twice once
as keyword and once as a non-keyword to allow an unstemmed version of a
diff --git a/docs/reference/analysis/tokenfilters/kstem-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/kstem-tokenfilter.asciidoc
index ff0695e64964f..3b7796bc596fa 100644
--- a/docs/reference/analysis/tokenfilters/kstem-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/kstem-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-kstem-tokenfilter]]
-=== KStem Token Filter
+=== KStem token filter
+++++
+KStem
+++++
The `kstem` token filter is a high performance filter for english. All
terms must already be lowercased (use `lowercase` filter) for this
diff --git a/docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc
index 86e14c09d51cd..c6b134f8735be 100644
--- a/docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-minhash-tokenfilter]]
-=== MinHash Token Filter
+=== MinHash token filter
+++++
+MinHash
+++++
The `min_hash` token filter hashes each token of the token stream and divides
the resulting hashes into buckets, keeping the lowest-valued hashes per
diff --git a/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc
index c943c95defe2d..e12fa99324228 100644
--- a/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-multiplexer-tokenfilter]]
-=== Multiplexer Token Filter
+=== Multiplexer token filter
+++++
+Multiplexer
+++++
A token filter of type `multiplexer` will emit multiple tokens at the same position,
each version of the token having been run through a different filter. Identical
diff --git a/docs/reference/analysis/tokenfilters/normalization-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/normalization-tokenfilter.asciidoc
index 2ff8ab134972a..85f33d3f38490 100644
--- a/docs/reference/analysis/tokenfilters/normalization-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/normalization-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-normalization-tokenfilter]]
-=== Normalization Token Filter
+=== Normalization token filters
+++++
+Normalization
+++++
There are several token filters available which try to normalize special
characters of a certain language.
diff --git a/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc
index 0b5aa62029fea..7b9a3b3199040 100644
--- a/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-pattern-capture-tokenfilter]]
-=== Pattern Capture Token Filter
+=== Pattern capture token filter
+++++
+Pattern capture
+++++
The `pattern_capture` token filter, unlike the `pattern` tokenizer,
emits a token for every capture group in the regular expression.
diff --git a/docs/reference/analysis/tokenfilters/pattern_replace-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/pattern_replace-tokenfilter.asciidoc
index bc8cdc385bf56..85ddd236556f0 100644
--- a/docs/reference/analysis/tokenfilters/pattern_replace-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/pattern_replace-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-pattern_replace-tokenfilter]]
-=== Pattern Replace Token Filter
+=== Pattern replace token filter
+++++
+Pattern replace
+++++
The `pattern_replace` token filter allows to easily handle string
replacements based on a regular expression. The regular expression is
diff --git a/docs/reference/analysis/tokenfilters/phonetic-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/phonetic-tokenfilter.asciidoc
index 4a7324acc39a7..cceac39e691ca 100644
--- a/docs/reference/analysis/tokenfilters/phonetic-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/phonetic-tokenfilter.asciidoc
@@ -1,4 +1,7 @@
[[analysis-phonetic-tokenfilter]]
-=== Phonetic Token Filter
+=== Phonetic token filter
+++++
+Phonetic
+++++
The `phonetic` token filter is provided as the {plugins}/analysis-phonetic.html[`analysis-phonetic`] plugin.
diff --git a/docs/reference/analysis/tokenfilters/porterstem-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/porterstem-tokenfilter.asciidoc
index fc2edf526c372..519618c2b2108 100644
--- a/docs/reference/analysis/tokenfilters/porterstem-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/porterstem-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-porterstem-tokenfilter]]
-=== Porter Stem Token Filter
+=== Porter stem token filter
+++++
+Porter stem
+++++
A token filter of type `porter_stem` that transforms the token stream as
per the Porter stemming algorithm.
diff --git a/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc
index e21e4e5690f60..2360b386aae55 100644
--- a/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-predicatefilter-tokenfilter]]
-=== Predicate Token Filter Script
+=== Predicate script token filter
+++++
+Predicate script
+++++
The predicate_token_filter token filter takes a predicate script, and removes tokens that do
not match the predicate.
diff --git a/docs/reference/analysis/tokenfilters/remove-duplicates-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/remove-duplicates-tokenfilter.asciidoc
index 594e18eaf7f7e..e9dbf1ed15303 100644
--- a/docs/reference/analysis/tokenfilters/remove-duplicates-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/remove-duplicates-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-remove-duplicates-tokenfilter]]
-=== Remove Duplicates Token Filter
+=== Remove duplicates token filter
+++++
+Remove duplicates
+++++
A token filter of type `remove_duplicates` that drops identical tokens at the
same position.
diff --git a/docs/reference/analysis/tokenfilters/reverse-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/reverse-tokenfilter.asciidoc
index b00499815553b..08eaa796951c8 100644
--- a/docs/reference/analysis/tokenfilters/reverse-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/reverse-tokenfilter.asciidoc
@@ -1,4 +1,7 @@
[[analysis-reverse-tokenfilter]]
-=== Reverse Token Filter
+=== Reverse token filter
+++++
+Reverse
+++++
A token filter of type `reverse` that simply reverses each token.
diff --git a/docs/reference/analysis/tokenfilters/shingle-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/shingle-tokenfilter.asciidoc
index a6d544fc7b39d..5f6bec96dbae7 100644
--- a/docs/reference/analysis/tokenfilters/shingle-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/shingle-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-shingle-tokenfilter]]
-=== Shingle Token Filter
+=== Shingle token filter
+++++
+Shingle
+++++
NOTE: Shingles are generally used to help speed up phrase queries. Rather
than building filter chains by hand, you may find it easier to use the
diff --git a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc
index bafb4fb7f7734..df1df3a43cb31 100644
--- a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-snowball-tokenfilter]]
-=== Snowball Token Filter
+=== Snowball token filter
+++++
+Snowball
+++++
A filter that stems words using a Snowball-generated stemmer. The
`language` parameter controls the stemmer with the following available
diff --git a/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc
index d2fbba841808e..94d64bb82ea9b 100644
--- a/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-stemmer-override-tokenfilter]]
-=== Stemmer Override Token Filter
+=== Stemmer override token filter
+++++
+Stemmer override
+++++
Overrides stemming algorithms, by applying a custom mapping, then
protecting these terms from being modified by stemmers. Must be placed
diff --git a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc
index 29ae8e96606c7..4e98e24d08ef0 100644
--- a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-stemmer-tokenfilter]]
-=== Stemmer Token Filter
+=== Stemmer token filter
+++++
+Stemmer
+++++
// Adds attribute for the 'minimal_portuguese' stemmer values link.
// This link contains ~, which is converted to subscript.
diff --git a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc
index f4019fa1800e9..3263d89e2b3ff 100644
--- a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-stop-tokenfilter]]
-=== Stop Token Filter
+=== Stop token filter
+++++
+Stop
+++++
A token filter of type `stop` that removes stop words from token
streams.
diff --git a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc
index 63e037de486f8..13e7609ac7ee2 100644
--- a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-synonym-graph-tokenfilter]]
-=== Synonym Graph Token Filter
+=== Synonym graph token filter
+++++
+Synonym graph
+++++
The `synonym_graph` token filter allows to easily handle synonyms,
including multi-word synonyms correctly during the analysis process.
diff --git a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc
index 3c0e967afc4bd..58caf8bb28281 100644
--- a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-synonym-tokenfilter]]
-=== Synonym Token Filter
+=== Synonym token filter
+++++
+Synonym
+++++
The `synonym` token filter allows to easily handle synonyms during the
analysis process. Synonyms are configured using a configuration file.
diff --git a/docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc
index 34a0e93a3af22..1373811b0cb82 100644
--- a/docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc
@@ -1,4 +1,7 @@
[[analysis-trim-tokenfilter]]
-=== Trim Token Filter
+=== Trim token filter
+++++
+Trim
+++++
The `trim` token filter trims the whitespace surrounding a token.
diff --git a/docs/reference/analysis/tokenfilters/truncate-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/truncate-tokenfilter.asciidoc
index 4c28ddba38146..c1d171dbbbbc0 100644
--- a/docs/reference/analysis/tokenfilters/truncate-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/truncate-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-truncate-tokenfilter]]
-=== Truncate Token Filter
+=== Truncate token filter
+++++
+Truncate
+++++
The `truncate` token filter can be used to truncate tokens into a
specific length.
diff --git a/docs/reference/analysis/tokenfilters/unique-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/unique-tokenfilter.asciidoc
index 8b42f6b73b934..6ce084c183ff4 100644
--- a/docs/reference/analysis/tokenfilters/unique-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/unique-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-unique-tokenfilter]]
-=== Unique Token Filter
+=== Unique token filter
+++++
+Unique
+++++
The `unique` token filter can be used to only index unique tokens during
analysis. By default it is applied on all the token stream. If
diff --git a/docs/reference/analysis/tokenfilters/uppercase-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/uppercase-tokenfilter.asciidoc
index 639d1e9106849..c745f247ec3d9 100644
--- a/docs/reference/analysis/tokenfilters/uppercase-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/uppercase-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-uppercase-tokenfilter]]
-=== Uppercase Token Filter
+=== Uppercase token filter
+++++
+Uppercase
+++++
A token filter of type `uppercase` that normalizes token text to upper
case.
diff --git a/docs/reference/analysis/tokenfilters/word-delimiter-graph-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/word-delimiter-graph-tokenfilter.asciidoc
index 4acd0163109a4..66e7b18c74426 100644
--- a/docs/reference/analysis/tokenfilters/word-delimiter-graph-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/word-delimiter-graph-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-word-delimiter-graph-tokenfilter]]
-=== Word Delimiter Graph Token Filter
+=== Word delimiter graph token filter
+++++
+Word delimiter graph
+++++
experimental[This functionality is marked as experimental in Lucene]
diff --git a/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc
index 1c07176430eed..d0cea87176d41 100644
--- a/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc
@@ -1,5 +1,8 @@
[[analysis-word-delimiter-tokenfilter]]
-=== Word Delimiter Token Filter
+=== Word delimiter token filter
+++++
+Word delimiter
+++++
Named `word_delimiter`, it Splits words into subwords and performs
optional transformations on subword groups. Words are split into