diff --git a/.gitignore b/.gitignore index 7b482db063..ce84baa865 100644 --- a/.gitignore +++ b/.gitignore @@ -49,4 +49,13 @@ release/ .tools/ # NUnit test result file produced by nunit3-console.exe -[Tt]est[Rr]esult.xml \ No newline at end of file +[Tt]est[Rr]esult.xml +websites/**/_site/* +websites/**/tools/* +websites/**/_exported_templates/* +websites/**/api/.manifest +websites/**/docfx.log +websites/**/lucenetemplate/plugins/* +websites/apidocs/api/**/*.yml +websites/apidocs/api/**/*.manifest +!websites/apidocs/api/toc.yml \ No newline at end of file diff --git a/Lucene.Net.sln b/Lucene.Net.sln index d80ff6124e..1991479c8a 100644 --- a/Lucene.Net.sln +++ b/Lucene.Net.sln @@ -112,6 +112,15 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Lucene.Net.Tests.Join", "sr EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Lucene.Net.Tests.Memory", "src\Lucene.Net.Tests.Memory\Lucene.Net.Tests.Memory.csproj", "{3BE7B6EA-8DBC-45E2-947C-1CA7E63B5603}" EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "apidocs", "apidocs", "{58FD6E39-F30F-4566-90E5-B7C9D6BC0660}" + ProjectSection(SolutionItems) = preProject + apidocs\docfx.filter.yml = apidocs\docfx.filter.yml + apidocs\docfx.json = apidocs\docfx.json + apidocs\docs.ps1 = apidocs\docs.ps1 + apidocs\index.md = apidocs\index.md + apidocs\toc.yml = apidocs\toc.yml + EndProjectSection +EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Lucene.Net.Tests.Misc", "src\Lucene.Net.Tests.Misc\Lucene.Net.Tests.Misc.csproj", "{F8DDC5B7-A621-4B67-AB4B-BBE083C05BB8}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Lucene.Net.Tests.Queries", "src\Lucene.Net.Tests.Queries\Lucene.Net.Tests.Queries.csproj", "{AC750DC0-05A3-4F96-8CC5-CFC8FD01D4CF}" @@ -357,8 +366,8 @@ Global HideSolutionNode = FALSE EndGlobalSection GlobalSection(NestedProjects) = preSolution - {EFB2E31A-5917-49D5-A808-FE5061A550B4} = {8CA61D33-3590-4024-A304-7B1F75B50653} {4DF7EACE-2B25-43F6-B558-8520BF20BD76} = {8CA61D33-3590-4024-A304-7B1F75B50653} + {EFB2E31A-5917-49D5-A808-FE5061A550B4} = {8CA61D33-3590-4024-A304-7B1F75B50653} {119BBACD-D4DB-4E3B-922F-3DA83E0B29E2} = {4DF7EACE-2B25-43F6-B558-8520BF20BD76} {CF3A74CA-FEFD-4F41-961B-CC8CF8D96286} = {8CA61D33-3590-4024-A304-7B1F75B50653} {4B054831-5275-44E2-A4D4-CA0B19BEE19A} = {8CA61D33-3590-4024-A304-7B1F75B50653} diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/package.md b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/package.md index b4b5e73271..c5bb917787 100644 --- a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/package.md +++ b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/package.md @@ -16,7 +16,7 @@ limitations under the License. --> - + Analyzer for Chinese, Japanese, and Korean, which indexes bigrams. This analyzer generates bigram terms, which are overlapping groups of two adjacent Han, Hiragana, Katakana, or Hangul characters. diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cn/package.md b/src/Lucene.Net.Analysis.Common/Analysis/Cn/package.md index 50a3555371..51fbfdc159 100644 --- a/src/Lucene.Net.Analysis.Common/Analysis/Cn/package.md +++ b/src/Lucene.Net.Analysis.Common/Analysis/Cn/package.md @@ -16,7 +16,7 @@ limitations under the License. --> - + Analyzer for Chinese, which indexes unigrams (individual chinese characters). diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Compound/package.md b/src/Lucene.Net.Analysis.Common/Analysis/Compound/package.md index 77585b4e9e..c807b87f31 100644 --- a/src/Lucene.Net.Analysis.Common/Analysis/Compound/package.md +++ b/src/Lucene.Net.Analysis.Common/Analysis/Compound/package.md @@ -74,8 +74,8 @@ filter available: #### HyphenationCompoundWordTokenFilter -The [](xref:Lucene.Net.Analysis.Compound.HyphenationCompoundWordTokenFilter -HyphenationCompoundWordTokenFilter) uses hyphenation grammars to find +The [ +HyphenationCompoundWordTokenFilter](xref:Lucene.Net.Analysis.Compound.HyphenationCompoundWordTokenFilter) uses hyphenation grammars to find potential subwords that a worth to check against the dictionary. It can be used without a dictionary as well but then produces a lot of "nonword" tokens. The quality of the output tokens is directly connected to the quality of the @@ -101,8 +101,8 @@ Credits for the hyphenation code go to the #### DictionaryCompoundWordTokenFilter -The [](xref:Lucene.Net.Analysis.Compound.DictionaryCompoundWordTokenFilter -DictionaryCompoundWordTokenFilter) uses a dictionary-only approach to +The [ +DictionaryCompoundWordTokenFilter](xref:Lucene.Net.Analysis.Compound.DictionaryCompoundWordTokenFilter) uses a dictionary-only approach to find subwords in a compound word. It is much slower than the one that uses the hyphenation grammars. You can use it as a first start to see if your dictionary is good or not because it is much simpler in design. diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/package.md b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/package.md index bf1ec16dda..dc5c944cb8 100644 --- a/src/Lucene.Net.Analysis.Common/Analysis/Payloads/package.md +++ b/src/Lucene.Net.Analysis.Common/Analysis/Payloads/package.md @@ -15,11 +15,8 @@ See the License for the specific language governing permissions and limitations under the License. --> - -
-[](xref:Lucene.Net.Analysis.TokenAttributes.CharTermAttribute) | +The term text of a token. Implements {@link java.lang.CharSequence} (providing methods length() and charAt(), and allowing e.g. for direct @@ -299,31 +304,31 @@ and proximity searches (though sentence identification is not provided by Lucene | |
[](xref:Lucene.Net.Analysis.TokenAttributes.OffsetAttribute) | +The start and end offset of a token in characters. | |
[](xref:Lucene.Net.Analysis.TokenAttributes.PositionIncrementAttribute) | +See above for detailed information about position increment. | |
[](xref:Lucene.Net.Analysis.TokenAttributes.PositionLengthAttribute) | +The number of positions occupied by a token. | |
[](xref:Lucene.Net.Analysis.TokenAttributes.PayloadAttribute) | +The payload that a Token can optionally have. | |
[](xref:Lucene.Net.Analysis.TokenAttributes.TypeAttribute) | +The type of the token. Default is 'word'. | |
[](xref:Lucene.Net.Analysis.TokenAttributes.FlagsAttribute) | +Optional flags a token can have. | |
[](xref:Lucene.Net.Analysis.TokenAttributes.KeywordAttribute) | +
Keyword-aware TokenStreams/-Filters skip modification of tokens that
return true from this attribute's isKeyword() method.
@@ -343,48 +348,48 @@ The code fragment of the [analysis workflow
protocol](#analysis-workflow) above shows a token stream being obtained, used, and then
left for garbage. However, that does not mean that the components of
that token stream will, in fact, be discarded. The default is just the
-opposite. [](xref:Lucene.Net.Analysis.Analyzer) applies a reuse
+opposite. |