@azure/search-documents 12.0.0-beta.4 → 12.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +579 -2392
- package/dist/index.js.map +1 -1
- package/dist-esm/src/constants.js +1 -1
- package/dist-esm/src/constants.js.map +1 -1
- package/dist-esm/src/generated/data/models/index.js +26 -226
- package/dist-esm/src/generated/data/models/index.js.map +1 -1
- package/dist-esm/src/generated/data/models/mappers.js +49 -222
- package/dist-esm/src/generated/data/models/mappers.js.map +1 -1
- package/dist-esm/src/generated/data/models/parameters.js +38 -89
- package/dist-esm/src/generated/data/models/parameters.js.map +1 -1
- package/dist-esm/src/generated/data/operations/documents.js +5 -10
- package/dist-esm/src/generated/data/operations/documents.js.map +1 -1
- package/dist-esm/src/generated/data/searchClient.js +28 -2
- package/dist-esm/src/generated/data/searchClient.js.map +1 -1
- package/dist-esm/src/generated/service/models/index.js +158 -181
- package/dist-esm/src/generated/service/models/index.js.map +1 -1
- package/dist-esm/src/generated/service/models/mappers.js +25 -800
- package/dist-esm/src/generated/service/models/mappers.js.map +1 -1
- package/dist-esm/src/generated/service/models/parameters.js +1 -51
- package/dist-esm/src/generated/service/models/parameters.js.map +1 -1
- package/dist-esm/src/generated/service/operations/dataSources.js +1 -4
- package/dist-esm/src/generated/service/operations/dataSources.js.map +1 -1
- package/dist-esm/src/generated/service/operations/index.js +0 -1
- package/dist-esm/src/generated/service/operations/index.js.map +1 -1
- package/dist-esm/src/generated/service/operations/indexers.js +1 -29
- package/dist-esm/src/generated/service/operations/indexers.js.map +1 -1
- package/dist-esm/src/generated/service/operations/skillsets.js +1 -30
- package/dist-esm/src/generated/service/operations/skillsets.js.map +1 -1
- package/dist-esm/src/generated/service/operationsInterfaces/index.js +0 -1
- package/dist-esm/src/generated/service/operationsInterfaces/index.js.map +1 -1
- package/dist-esm/src/generated/service/operationsInterfaces/indexers.js.map +1 -1
- package/dist-esm/src/generated/service/operationsInterfaces/skillsets.js.map +1 -1
- package/dist-esm/src/generated/service/searchServiceClient.js +29 -4
- package/dist-esm/src/generated/service/searchServiceClient.js.map +1 -1
- package/dist-esm/src/index.js +2 -3
- package/dist-esm/src/index.js.map +1 -1
- package/dist-esm/src/indexDocumentsBatch.js.map +1 -1
- package/dist-esm/src/indexModels.js.map +1 -1
- package/dist-esm/src/searchClient.js +29 -26
- package/dist-esm/src/searchClient.js.map +1 -1
- package/dist-esm/src/searchIndexClient.js +11 -174
- package/dist-esm/src/searchIndexClient.js.map +1 -1
- package/dist-esm/src/searchIndexerClient.js +3 -49
- package/dist-esm/src/searchIndexerClient.js.map +1 -1
- package/dist-esm/src/searchIndexingBufferedSender.js.map +1 -1
- package/dist-esm/src/serviceModels.js.map +1 -1
- package/dist-esm/src/serviceUtils.js +31 -99
- package/dist-esm/src/serviceUtils.js.map +1 -1
- package/package.json +7 -7
- package/types/search-documents.d.ts +735 -2682
- package/dist-esm/src/generated/service/operations/aliases.js +0 -160
- package/dist-esm/src/generated/service/operations/aliases.js.map +0 -1
- package/dist-esm/src/generated/service/operationsInterfaces/aliases.js +0 -9
- package/dist-esm/src/generated/service/operationsInterfaces/aliases.js.map +0 -1
|
@@ -5,12 +5,12 @@
|
|
|
5
5
|
* Code generated by Microsoft (R) AutoRest Code Generator.
|
|
6
6
|
* Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
|
7
7
|
*/
|
|
8
|
-
/** Known values of {@link
|
|
9
|
-
export var
|
|
10
|
-
(function (
|
|
11
|
-
/** Api Version '2023-
|
|
12
|
-
|
|
13
|
-
})(
|
|
8
|
+
/** Known values of {@link ApiVersion20231101} that the service accepts. */
|
|
9
|
+
export var KnownApiVersion20231101;
|
|
10
|
+
(function (KnownApiVersion20231101) {
|
|
11
|
+
/** Api Version '2023-11-01' */
|
|
12
|
+
KnownApiVersion20231101["TwoThousandTwentyThree1101"] = "2023-11-01";
|
|
13
|
+
})(KnownApiVersion20231101 || (KnownApiVersion20231101 = {}));
|
|
14
14
|
/** Known values of {@link SearchIndexerDataSourceType} that the service accepts. */
|
|
15
15
|
export var KnownSearchIndexerDataSourceType;
|
|
16
16
|
(function (KnownSearchIndexerDataSourceType) {
|
|
@@ -38,9 +38,9 @@ export var KnownBlobIndexerParsingMode;
|
|
|
38
38
|
KnownBlobIndexerParsingMode["DelimitedText"] = "delimitedText";
|
|
39
39
|
/** Set to json to extract structured content from JSON files. */
|
|
40
40
|
KnownBlobIndexerParsingMode["Json"] = "json";
|
|
41
|
-
/** Set to jsonArray to extract individual elements of a JSON array as separate documents
|
|
41
|
+
/** Set to jsonArray to extract individual elements of a JSON array as separate documents. */
|
|
42
42
|
KnownBlobIndexerParsingMode["JsonArray"] = "jsonArray";
|
|
43
|
-
/** Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents
|
|
43
|
+
/** Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. */
|
|
44
44
|
KnownBlobIndexerParsingMode["JsonLines"] = "jsonLines";
|
|
45
45
|
})(KnownBlobIndexerParsingMode || (KnownBlobIndexerParsingMode = {}));
|
|
46
46
|
/** Known values of {@link BlobIndexerDataToExtract} that the service accepts. */
|
|
@@ -74,33 +74,11 @@ export var KnownBlobIndexerPDFTextRotationAlgorithm;
|
|
|
74
74
|
/** Known values of {@link IndexerExecutionEnvironment} that the service accepts. */
|
|
75
75
|
export var KnownIndexerExecutionEnvironment;
|
|
76
76
|
(function (KnownIndexerExecutionEnvironment) {
|
|
77
|
-
/** Indicates that
|
|
77
|
+
/** Indicates that the search service can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. */
|
|
78
78
|
KnownIndexerExecutionEnvironment["Standard"] = "standard";
|
|
79
79
|
/** Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources. */
|
|
80
80
|
KnownIndexerExecutionEnvironment["Private"] = "private";
|
|
81
81
|
})(KnownIndexerExecutionEnvironment || (KnownIndexerExecutionEnvironment = {}));
|
|
82
|
-
/** Known values of {@link IndexerExecutionStatusDetail} that the service accepts. */
|
|
83
|
-
export var KnownIndexerExecutionStatusDetail;
|
|
84
|
-
(function (KnownIndexerExecutionStatusDetail) {
|
|
85
|
-
/** Indicates that the reset that occurred was for a call to ResetDocs. */
|
|
86
|
-
KnownIndexerExecutionStatusDetail["ResetDocs"] = "resetDocs";
|
|
87
|
-
})(KnownIndexerExecutionStatusDetail || (KnownIndexerExecutionStatusDetail = {}));
|
|
88
|
-
/** Known values of {@link IndexingMode} that the service accepts. */
|
|
89
|
-
export var KnownIndexingMode;
|
|
90
|
-
(function (KnownIndexingMode) {
|
|
91
|
-
/** The indexer is indexing all documents in the datasource. */
|
|
92
|
-
KnownIndexingMode["IndexingAllDocs"] = "indexingAllDocs";
|
|
93
|
-
/** The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status. */
|
|
94
|
-
KnownIndexingMode["IndexingResetDocs"] = "indexingResetDocs";
|
|
95
|
-
})(KnownIndexingMode || (KnownIndexingMode = {}));
|
|
96
|
-
/** Known values of {@link IndexProjectionMode} that the service accepts. */
|
|
97
|
-
export var KnownIndexProjectionMode;
|
|
98
|
-
(function (KnownIndexProjectionMode) {
|
|
99
|
-
/** The source document will be skipped from writing into the indexer's target index. */
|
|
100
|
-
KnownIndexProjectionMode["SkipIndexingParentDocuments"] = "skipIndexingParentDocuments";
|
|
101
|
-
/** The source document will be written into the indexer's target index. This is the default pattern. */
|
|
102
|
-
KnownIndexProjectionMode["IncludeIndexingParentDocuments"] = "includeIndexingParentDocuments";
|
|
103
|
-
})(KnownIndexProjectionMode || (KnownIndexProjectionMode = {}));
|
|
104
82
|
/** Known values of {@link SearchFieldDataType} that the service accepts. */
|
|
105
83
|
export var KnownSearchFieldDataType;
|
|
106
84
|
(function (KnownSearchFieldDataType) {
|
|
@@ -114,7 +92,7 @@ export var KnownSearchFieldDataType;
|
|
|
114
92
|
KnownSearchFieldDataType["Double"] = "Edm.Double";
|
|
115
93
|
/** Indicates that a field contains a Boolean value (true or false). */
|
|
116
94
|
KnownSearchFieldDataType["Boolean"] = "Edm.Boolean";
|
|
117
|
-
/** Indicates that a field contains a date
|
|
95
|
+
/** Indicates that a field contains a date\/time value, including timezone information. */
|
|
118
96
|
KnownSearchFieldDataType["DateTimeOffset"] = "Edm.DateTimeOffset";
|
|
119
97
|
/** Indicates that a field contains a geo-location in terms of longitude and latitude. */
|
|
120
98
|
KnownSearchFieldDataType["GeographyPoint"] = "Edm.GeographyPoint";
|
|
@@ -300,132 +278,35 @@ export var KnownLexicalAnalyzerName;
|
|
|
300
278
|
KnownLexicalAnalyzerName["ViMicrosoft"] = "vi.microsoft";
|
|
301
279
|
/** Standard Lucene analyzer. */
|
|
302
280
|
KnownLexicalAnalyzerName["StandardLucene"] = "standard.lucene";
|
|
303
|
-
/** Standard ASCII Folding Lucene analyzer. See https
|
|
281
|
+
/** Standard ASCII Folding Lucene analyzer. See https:\//docs.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#Analyzers */
|
|
304
282
|
KnownLexicalAnalyzerName["StandardAsciiFoldingLucene"] = "standardasciifolding.lucene";
|
|
305
|
-
/** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http
|
|
283
|
+
/** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordAnalyzer.html */
|
|
306
284
|
KnownLexicalAnalyzerName["Keyword"] = "keyword";
|
|
307
|
-
/** Flexibly separates text into terms via a regular expression pattern. See http
|
|
285
|
+
/** Flexibly separates text into terms via a regular expression pattern. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/PatternAnalyzer.html */
|
|
308
286
|
KnownLexicalAnalyzerName["Pattern"] = "pattern";
|
|
309
|
-
/** Divides text at non-letters and converts them to lower case. See http
|
|
287
|
+
/** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/SimpleAnalyzer.html */
|
|
310
288
|
KnownLexicalAnalyzerName["Simple"] = "simple";
|
|
311
|
-
/** Divides text at non-letters; Applies the lowercase and stopword token filters. See http
|
|
289
|
+
/** Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopAnalyzer.html */
|
|
312
290
|
KnownLexicalAnalyzerName["Stop"] = "stop";
|
|
313
|
-
/** An analyzer that uses the whitespace tokenizer. See http
|
|
291
|
+
/** An analyzer that uses the whitespace tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceAnalyzer.html */
|
|
314
292
|
KnownLexicalAnalyzerName["Whitespace"] = "whitespace";
|
|
315
293
|
})(KnownLexicalAnalyzerName || (KnownLexicalAnalyzerName = {}));
|
|
316
|
-
/** Known values of {@link LexicalNormalizerName} that the service accepts. */
|
|
317
|
-
export var KnownLexicalNormalizerName;
|
|
318
|
-
(function (KnownLexicalNormalizerName) {
|
|
319
|
-
/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html */
|
|
320
|
-
KnownLexicalNormalizerName["AsciiFolding"] = "asciifolding";
|
|
321
|
-
/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html */
|
|
322
|
-
KnownLexicalNormalizerName["Elision"] = "elision";
|
|
323
|
-
/** Normalizes token text to lowercase. See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.html */
|
|
324
|
-
KnownLexicalNormalizerName["Lowercase"] = "lowercase";
|
|
325
|
-
/** Standard normalizer, which consists of lowercase and asciifolding. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html */
|
|
326
|
-
KnownLexicalNormalizerName["Standard"] = "standard";
|
|
327
|
-
/** Normalizes token text to uppercase. See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html */
|
|
328
|
-
KnownLexicalNormalizerName["Uppercase"] = "uppercase";
|
|
329
|
-
})(KnownLexicalNormalizerName || (KnownLexicalNormalizerName = {}));
|
|
330
294
|
/** Known values of {@link VectorSearchAlgorithmKind} that the service accepts. */
|
|
331
295
|
export var KnownVectorSearchAlgorithmKind;
|
|
332
296
|
(function (KnownVectorSearchAlgorithmKind) {
|
|
333
|
-
/**
|
|
297
|
+
/** HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm. */
|
|
334
298
|
KnownVectorSearchAlgorithmKind["Hnsw"] = "hnsw";
|
|
335
299
|
/** Exhaustive KNN algorithm which will perform brute-force search. */
|
|
336
300
|
KnownVectorSearchAlgorithmKind["ExhaustiveKnn"] = "exhaustiveKnn";
|
|
337
301
|
})(KnownVectorSearchAlgorithmKind || (KnownVectorSearchAlgorithmKind = {}));
|
|
338
|
-
/** Known values of {@link VectorSearchVectorizerKind} that the service accepts. */
|
|
339
|
-
export var KnownVectorSearchVectorizerKind;
|
|
340
|
-
(function (KnownVectorSearchVectorizerKind) {
|
|
341
|
-
/** Generate embeddings using an Azure Open AI service at query time. */
|
|
342
|
-
KnownVectorSearchVectorizerKind["AzureOpenAI"] = "azureOpenAI";
|
|
343
|
-
/** Generate embeddings using a custom web endpoint at query time. */
|
|
344
|
-
KnownVectorSearchVectorizerKind["CustomWebApi"] = "customWebApi";
|
|
345
|
-
})(KnownVectorSearchVectorizerKind || (KnownVectorSearchVectorizerKind = {}));
|
|
346
|
-
/** Known values of {@link TokenFilterName} that the service accepts. */
|
|
347
|
-
export var KnownTokenFilterName;
|
|
348
|
-
(function (KnownTokenFilterName) {
|
|
349
|
-
/** A token filter that applies the Arabic normalizer to normalize the orthography. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html */
|
|
350
|
-
KnownTokenFilterName["ArabicNormalization"] = "arabic_normalization";
|
|
351
|
-
/** Strips all characters after an apostrophe (including the apostrophe itself). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html */
|
|
352
|
-
KnownTokenFilterName["Apostrophe"] = "apostrophe";
|
|
353
|
-
/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html */
|
|
354
|
-
KnownTokenFilterName["AsciiFolding"] = "asciifolding";
|
|
355
|
-
/** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html */
|
|
356
|
-
KnownTokenFilterName["CjkBigram"] = "cjk_bigram";
|
|
357
|
-
/** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html */
|
|
358
|
-
KnownTokenFilterName["CjkWidth"] = "cjk_width";
|
|
359
|
-
/** Removes English possessives, and dots from acronyms. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html */
|
|
360
|
-
KnownTokenFilterName["Classic"] = "classic";
|
|
361
|
-
/** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html */
|
|
362
|
-
KnownTokenFilterName["CommonGram"] = "common_grams";
|
|
363
|
-
/** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html */
|
|
364
|
-
KnownTokenFilterName["EdgeNGram"] = "edgeNGram_v2";
|
|
365
|
-
/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html */
|
|
366
|
-
KnownTokenFilterName["Elision"] = "elision";
|
|
367
|
-
/** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html */
|
|
368
|
-
KnownTokenFilterName["GermanNormalization"] = "german_normalization";
|
|
369
|
-
/** Normalizes text in Hindi to remove some differences in spelling variations. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html */
|
|
370
|
-
KnownTokenFilterName["HindiNormalization"] = "hindi_normalization";
|
|
371
|
-
/** Normalizes the Unicode representation of text in Indian languages. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html */
|
|
372
|
-
KnownTokenFilterName["IndicNormalization"] = "indic_normalization";
|
|
373
|
-
/** Emits each incoming token twice, once as keyword and once as non-keyword. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html */
|
|
374
|
-
KnownTokenFilterName["KeywordRepeat"] = "keyword_repeat";
|
|
375
|
-
/** A high-performance kstem filter for English. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html */
|
|
376
|
-
KnownTokenFilterName["KStem"] = "kstem";
|
|
377
|
-
/** Removes words that are too long or too short. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html */
|
|
378
|
-
KnownTokenFilterName["Length"] = "length";
|
|
379
|
-
/** Limits the number of tokens while indexing. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html */
|
|
380
|
-
KnownTokenFilterName["Limit"] = "limit";
|
|
381
|
-
/** Normalizes token text to lower case. See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.html */
|
|
382
|
-
KnownTokenFilterName["Lowercase"] = "lowercase";
|
|
383
|
-
/** Generates n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html */
|
|
384
|
-
KnownTokenFilterName["NGram"] = "nGram_v2";
|
|
385
|
-
/** Applies normalization for Persian. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html */
|
|
386
|
-
KnownTokenFilterName["PersianNormalization"] = "persian_normalization";
|
|
387
|
-
/** Create tokens for phonetic matches. See https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html */
|
|
388
|
-
KnownTokenFilterName["Phonetic"] = "phonetic";
|
|
389
|
-
/** Uses the Porter stemming algorithm to transform the token stream. See http://tartarus.org/~martin/PorterStemmer */
|
|
390
|
-
KnownTokenFilterName["PorterStem"] = "porter_stem";
|
|
391
|
-
/** Reverses the token string. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html */
|
|
392
|
-
KnownTokenFilterName["Reverse"] = "reverse";
|
|
393
|
-
/** Normalizes use of the interchangeable Scandinavian characters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html */
|
|
394
|
-
KnownTokenFilterName["ScandinavianNormalization"] = "scandinavian_normalization";
|
|
395
|
-
/** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html */
|
|
396
|
-
KnownTokenFilterName["ScandinavianFoldingNormalization"] = "scandinavian_folding";
|
|
397
|
-
/** Creates combinations of tokens as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html */
|
|
398
|
-
KnownTokenFilterName["Shingle"] = "shingle";
|
|
399
|
-
/** A filter that stems words using a Snowball-generated stemmer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html */
|
|
400
|
-
KnownTokenFilterName["Snowball"] = "snowball";
|
|
401
|
-
/** Normalizes the Unicode representation of Sorani text. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html */
|
|
402
|
-
KnownTokenFilterName["SoraniNormalization"] = "sorani_normalization";
|
|
403
|
-
/** Language specific stemming filter. See https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters */
|
|
404
|
-
KnownTokenFilterName["Stemmer"] = "stemmer";
|
|
405
|
-
/** Removes stop words from a token stream. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html */
|
|
406
|
-
KnownTokenFilterName["Stopwords"] = "stopwords";
|
|
407
|
-
/** Trims leading and trailing whitespace from tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html */
|
|
408
|
-
KnownTokenFilterName["Trim"] = "trim";
|
|
409
|
-
/** Truncates the terms to a specific length. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html */
|
|
410
|
-
KnownTokenFilterName["Truncate"] = "truncate";
|
|
411
|
-
/** Filters out tokens with same text as the previous token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html */
|
|
412
|
-
KnownTokenFilterName["Unique"] = "unique";
|
|
413
|
-
/** Normalizes token text to upper case. See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html */
|
|
414
|
-
KnownTokenFilterName["Uppercase"] = "uppercase";
|
|
415
|
-
/** Splits words into subwords and performs optional transformations on subword groups. */
|
|
416
|
-
KnownTokenFilterName["WordDelimiter"] = "word_delimiter";
|
|
417
|
-
})(KnownTokenFilterName || (KnownTokenFilterName = {}));
|
|
418
|
-
/** Known values of {@link CharFilterName} that the service accepts. */
|
|
419
|
-
export var KnownCharFilterName;
|
|
420
|
-
(function (KnownCharFilterName) {
|
|
421
|
-
/** A character filter that attempts to strip out HTML constructs. See https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.html */
|
|
422
|
-
KnownCharFilterName["HtmlStrip"] = "html_strip";
|
|
423
|
-
})(KnownCharFilterName || (KnownCharFilterName = {}));
|
|
424
302
|
/** Known values of {@link VectorSearchAlgorithmMetric} that the service accepts. */
|
|
425
303
|
export var KnownVectorSearchAlgorithmMetric;
|
|
426
304
|
(function (KnownVectorSearchAlgorithmMetric) {
|
|
305
|
+
/** Cosine */
|
|
427
306
|
KnownVectorSearchAlgorithmMetric["Cosine"] = "cosine";
|
|
307
|
+
/** Euclidean */
|
|
428
308
|
KnownVectorSearchAlgorithmMetric["Euclidean"] = "euclidean";
|
|
309
|
+
/** DotProduct */
|
|
429
310
|
KnownVectorSearchAlgorithmMetric["DotProduct"] = "dotProduct";
|
|
430
311
|
})(KnownVectorSearchAlgorithmMetric || (KnownVectorSearchAlgorithmMetric = {}));
|
|
431
312
|
/** Known values of {@link KeyPhraseExtractionSkillLanguage} that the service accepts. */
|
|
@@ -808,18 +689,6 @@ export var KnownOcrSkillLanguage;
|
|
|
808
689
|
/** Unknown (All) */
|
|
809
690
|
KnownOcrSkillLanguage["Unk"] = "unk";
|
|
810
691
|
})(KnownOcrSkillLanguage || (KnownOcrSkillLanguage = {}));
|
|
811
|
-
/** Known values of {@link LineEnding} that the service accepts. */
|
|
812
|
-
export var KnownLineEnding;
|
|
813
|
-
(function (KnownLineEnding) {
|
|
814
|
-
/** Lines are separated by a single space character. */
|
|
815
|
-
KnownLineEnding["Space"] = "space";
|
|
816
|
-
/** Lines are separated by a carriage return ('\r') character. */
|
|
817
|
-
KnownLineEnding["CarriageReturn"] = "carriageReturn";
|
|
818
|
-
/** Lines are separated by a single line feed ('\n') character. */
|
|
819
|
-
KnownLineEnding["LineFeed"] = "lineFeed";
|
|
820
|
-
/** Lines are separated by a carriage return and a line feed ('\r\n') character. */
|
|
821
|
-
KnownLineEnding["CarriageReturnLineFeed"] = "carriageReturnLineFeed";
|
|
822
|
-
})(KnownLineEnding || (KnownLineEnding = {}));
|
|
823
692
|
/** Known values of {@link ImageAnalysisSkillLanguage} that the service accepts. */
|
|
824
693
|
export var KnownImageAnalysisSkillLanguage;
|
|
825
694
|
(function (KnownImageAnalysisSkillLanguage) {
|
|
@@ -1067,6 +936,12 @@ export var KnownPIIDetectionSkillMaskingMode;
|
|
|
1067
936
|
/** Known values of {@link SplitSkillLanguage} that the service accepts. */
|
|
1068
937
|
export var KnownSplitSkillLanguage;
|
|
1069
938
|
(function (KnownSplitSkillLanguage) {
|
|
939
|
+
/** Amharic */
|
|
940
|
+
KnownSplitSkillLanguage["Am"] = "am";
|
|
941
|
+
/** Bosnian */
|
|
942
|
+
KnownSplitSkillLanguage["Bs"] = "bs";
|
|
943
|
+
/** Czech */
|
|
944
|
+
KnownSplitSkillLanguage["Cs"] = "cs";
|
|
1070
945
|
/** Danish */
|
|
1071
946
|
KnownSplitSkillLanguage["Da"] = "da";
|
|
1072
947
|
/** German */
|
|
@@ -1075,16 +950,58 @@ export var KnownSplitSkillLanguage;
|
|
|
1075
950
|
KnownSplitSkillLanguage["En"] = "en";
|
|
1076
951
|
/** Spanish */
|
|
1077
952
|
KnownSplitSkillLanguage["Es"] = "es";
|
|
953
|
+
/** Estonian */
|
|
954
|
+
KnownSplitSkillLanguage["Et"] = "et";
|
|
1078
955
|
/** Finnish */
|
|
1079
956
|
KnownSplitSkillLanguage["Fi"] = "fi";
|
|
1080
957
|
/** French */
|
|
1081
958
|
KnownSplitSkillLanguage["Fr"] = "fr";
|
|
959
|
+
/** Hebrew */
|
|
960
|
+
KnownSplitSkillLanguage["He"] = "he";
|
|
961
|
+
/** Hindi */
|
|
962
|
+
KnownSplitSkillLanguage["Hi"] = "hi";
|
|
963
|
+
/** Croatian */
|
|
964
|
+
KnownSplitSkillLanguage["Hr"] = "hr";
|
|
965
|
+
/** Hungarian */
|
|
966
|
+
KnownSplitSkillLanguage["Hu"] = "hu";
|
|
967
|
+
/** Indonesian */
|
|
968
|
+
KnownSplitSkillLanguage["Id"] = "id";
|
|
969
|
+
/** Icelandic */
|
|
970
|
+
KnownSplitSkillLanguage["Is"] = "is";
|
|
1082
971
|
/** Italian */
|
|
1083
972
|
KnownSplitSkillLanguage["It"] = "it";
|
|
973
|
+
/** Japanese */
|
|
974
|
+
KnownSplitSkillLanguage["Ja"] = "ja";
|
|
1084
975
|
/** Korean */
|
|
1085
976
|
KnownSplitSkillLanguage["Ko"] = "ko";
|
|
1086
|
-
/**
|
|
977
|
+
/** Latvian */
|
|
978
|
+
KnownSplitSkillLanguage["Lv"] = "lv";
|
|
979
|
+
/** Norwegian */
|
|
980
|
+
KnownSplitSkillLanguage["Nb"] = "nb";
|
|
981
|
+
/** Dutch */
|
|
982
|
+
KnownSplitSkillLanguage["Nl"] = "nl";
|
|
983
|
+
/** Polish */
|
|
984
|
+
KnownSplitSkillLanguage["Pl"] = "pl";
|
|
985
|
+
/** Portuguese (Portugal) */
|
|
1087
986
|
KnownSplitSkillLanguage["Pt"] = "pt";
|
|
987
|
+
/** Portuguese (Brazil) */
|
|
988
|
+
KnownSplitSkillLanguage["PtBr"] = "pt-br";
|
|
989
|
+
/** Russian */
|
|
990
|
+
KnownSplitSkillLanguage["Ru"] = "ru";
|
|
991
|
+
/** Slovak */
|
|
992
|
+
KnownSplitSkillLanguage["Sk"] = "sk";
|
|
993
|
+
/** Slovenian */
|
|
994
|
+
KnownSplitSkillLanguage["Sl"] = "sl";
|
|
995
|
+
/** Serbian */
|
|
996
|
+
KnownSplitSkillLanguage["Sr"] = "sr";
|
|
997
|
+
/** Swedish */
|
|
998
|
+
KnownSplitSkillLanguage["Sv"] = "sv";
|
|
999
|
+
/** Turkish */
|
|
1000
|
+
KnownSplitSkillLanguage["Tr"] = "tr";
|
|
1001
|
+
/** Urdu */
|
|
1002
|
+
KnownSplitSkillLanguage["Ur"] = "ur";
|
|
1003
|
+
/** Chinese (Simplified) */
|
|
1004
|
+
KnownSplitSkillLanguage["Zh"] = "zh";
|
|
1088
1005
|
})(KnownSplitSkillLanguage || (KnownSplitSkillLanguage = {}));
|
|
1089
1006
|
/** Known values of {@link TextSplitMode} that the service accepts. */
|
|
1090
1007
|
export var KnownTextSplitMode;
|
|
@@ -1183,10 +1100,6 @@ export var KnownTextTranslationSkillLanguage;
|
|
|
1183
1100
|
KnownTextTranslationSkillLanguage["Sw"] = "sw";
|
|
1184
1101
|
/** Klingon */
|
|
1185
1102
|
KnownTextTranslationSkillLanguage["Tlh"] = "tlh";
|
|
1186
|
-
/** Klingon (Latin script) */
|
|
1187
|
-
KnownTextTranslationSkillLanguage["TlhLatn"] = "tlh-Latn";
|
|
1188
|
-
/** Klingon (Klingon script) */
|
|
1189
|
-
KnownTextTranslationSkillLanguage["TlhPiqd"] = "tlh-Piqd";
|
|
1190
1103
|
/** Korean */
|
|
1191
1104
|
KnownTextTranslationSkillLanguage["Ko"] = "ko";
|
|
1192
1105
|
/** Latvian */
|
|
@@ -1207,10 +1120,6 @@ export var KnownTextTranslationSkillLanguage;
|
|
|
1207
1120
|
KnownTextTranslationSkillLanguage["Pl"] = "pl";
|
|
1208
1121
|
/** Portuguese */
|
|
1209
1122
|
KnownTextTranslationSkillLanguage["Pt"] = "pt";
|
|
1210
|
-
/** Portuguese (Brazil) */
|
|
1211
|
-
KnownTextTranslationSkillLanguage["PtBr"] = "pt-br";
|
|
1212
|
-
/** Portuguese (Portugal) */
|
|
1213
|
-
KnownTextTranslationSkillLanguage["PtPT"] = "pt-PT";
|
|
1214
1123
|
/** Queretaro Otomi */
|
|
1215
1124
|
KnownTextTranslationSkillLanguage["Otq"] = "otq";
|
|
1216
1125
|
/** Romanian */
|
|
@@ -1253,47 +1162,115 @@ export var KnownTextTranslationSkillLanguage;
|
|
|
1253
1162
|
KnownTextTranslationSkillLanguage["Cy"] = "cy";
|
|
1254
1163
|
/** Yucatec Maya */
|
|
1255
1164
|
KnownTextTranslationSkillLanguage["Yua"] = "yua";
|
|
1256
|
-
/** Irish */
|
|
1257
|
-
KnownTextTranslationSkillLanguage["Ga"] = "ga";
|
|
1258
|
-
/** Kannada */
|
|
1259
|
-
KnownTextTranslationSkillLanguage["Kn"] = "kn";
|
|
1260
|
-
/** Maori */
|
|
1261
|
-
KnownTextTranslationSkillLanguage["Mi"] = "mi";
|
|
1262
|
-
/** Malayalam */
|
|
1263
|
-
KnownTextTranslationSkillLanguage["Ml"] = "ml";
|
|
1264
|
-
/** Punjabi */
|
|
1265
|
-
KnownTextTranslationSkillLanguage["Pa"] = "pa";
|
|
1266
1165
|
})(KnownTextTranslationSkillLanguage || (KnownTextTranslationSkillLanguage = {}));
|
|
1267
1166
|
/** Known values of {@link LexicalTokenizerName} that the service accepts. */
|
|
1268
1167
|
export var KnownLexicalTokenizerName;
|
|
1269
1168
|
(function (KnownLexicalTokenizerName) {
|
|
1270
|
-
/** Grammar-based tokenizer that is suitable for processing most European-language documents. See http
|
|
1169
|
+
/** Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html */
|
|
1271
1170
|
KnownLexicalTokenizerName["Classic"] = "classic";
|
|
1272
|
-
/** Tokenizes the input from an edge into n-grams of the given size(s). See https
|
|
1171
|
+
/** Tokenizes the input from an edge into n-grams of the given size(s). See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenizer.html */
|
|
1273
1172
|
KnownLexicalTokenizerName["EdgeNGram"] = "edgeNGram";
|
|
1274
|
-
/** Emits the entire input as a single token. See http
|
|
1173
|
+
/** Emits the entire input as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordTokenizer.html */
|
|
1275
1174
|
KnownLexicalTokenizerName["Keyword"] = "keyword_v2";
|
|
1276
|
-
/** Divides text at non-letters. See http
|
|
1175
|
+
/** Divides text at non-letters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LetterTokenizer.html */
|
|
1277
1176
|
KnownLexicalTokenizerName["Letter"] = "letter";
|
|
1278
|
-
/** Divides text at non-letters and converts them to lower case. See http
|
|
1177
|
+
/** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseTokenizer.html */
|
|
1279
1178
|
KnownLexicalTokenizerName["Lowercase"] = "lowercase";
|
|
1280
1179
|
/** Divides text using language-specific rules. */
|
|
1281
1180
|
KnownLexicalTokenizerName["MicrosoftLanguageTokenizer"] = "microsoft_language_tokenizer";
|
|
1282
1181
|
/** Divides text using language-specific rules and reduces words to their base forms. */
|
|
1283
1182
|
KnownLexicalTokenizerName["MicrosoftLanguageStemmingTokenizer"] = "microsoft_language_stemming_tokenizer";
|
|
1284
|
-
/** Tokenizes the input into n-grams of the given size(s). See http
|
|
1183
|
+
/** Tokenizes the input into n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenizer.html */
|
|
1285
1184
|
KnownLexicalTokenizerName["NGram"] = "nGram";
|
|
1286
|
-
/** Tokenizer for path-like hierarchies. See http
|
|
1185
|
+
/** Tokenizer for path-like hierarchies. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/path\/PathHierarchyTokenizer.html */
|
|
1287
1186
|
KnownLexicalTokenizerName["PathHierarchy"] = "path_hierarchy_v2";
|
|
1288
|
-
/** Tokenizer that uses regex pattern matching to construct distinct tokens. See http
|
|
1187
|
+
/** Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/pattern\/PatternTokenizer.html */
|
|
1289
1188
|
KnownLexicalTokenizerName["Pattern"] = "pattern";
|
|
1290
|
-
/** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http
|
|
1189
|
+
/** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/StandardTokenizer.html */
|
|
1291
1190
|
KnownLexicalTokenizerName["Standard"] = "standard_v2";
|
|
1292
|
-
/** Tokenizes urls and emails as one token. See http
|
|
1191
|
+
/** Tokenizes urls and emails as one token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/UAX29URLEmailTokenizer.html */
|
|
1293
1192
|
KnownLexicalTokenizerName["UaxUrlEmail"] = "uax_url_email";
|
|
1294
|
-
/** Divides text at whitespace. See http
|
|
1193
|
+
/** Divides text at whitespace. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html */
|
|
1295
1194
|
KnownLexicalTokenizerName["Whitespace"] = "whitespace";
|
|
1296
1195
|
})(KnownLexicalTokenizerName || (KnownLexicalTokenizerName = {}));
|
|
1196
|
+
/** Known values of {@link TokenFilterName} that the service accepts. */
|
|
1197
|
+
export var KnownTokenFilterName;
|
|
1198
|
+
(function (KnownTokenFilterName) {
|
|
1199
|
+
/** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html */
|
|
1200
|
+
KnownTokenFilterName["ArabicNormalization"] = "arabic_normalization";
|
|
1201
|
+
/** Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html */
|
|
1202
|
+
KnownTokenFilterName["Apostrophe"] = "apostrophe";
|
|
1203
|
+
/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
|
|
1204
|
+
KnownTokenFilterName["AsciiFolding"] = "asciifolding";
|
|
1205
|
+
/** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html */
|
|
1206
|
+
KnownTokenFilterName["CjkBigram"] = "cjk_bigram";
|
|
1207
|
+
/** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html */
|
|
1208
|
+
KnownTokenFilterName["CjkWidth"] = "cjk_width";
|
|
1209
|
+
/** Removes English possessives, and dots from acronyms. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html */
|
|
1210
|
+
KnownTokenFilterName["Classic"] = "classic";
|
|
1211
|
+
/** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html */
|
|
1212
|
+
KnownTokenFilterName["CommonGram"] = "common_grams";
|
|
1213
|
+
/** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html */
|
|
1214
|
+
KnownTokenFilterName["EdgeNGram"] = "edgeNGram_v2";
|
|
1215
|
+
/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
|
|
1216
|
+
KnownTokenFilterName["Elision"] = "elision";
|
|
1217
|
+
/** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html */
|
|
1218
|
+
KnownTokenFilterName["GermanNormalization"] = "german_normalization";
|
|
1219
|
+
/** Normalizes text in Hindi to remove some differences in spelling variations. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html */
|
|
1220
|
+
KnownTokenFilterName["HindiNormalization"] = "hindi_normalization";
|
|
1221
|
+
/** Normalizes the Unicode representation of text in Indian languages. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html */
|
|
1222
|
+
KnownTokenFilterName["IndicNormalization"] = "indic_normalization";
|
|
1223
|
+
/** Emits each incoming token twice, once as keyword and once as non-keyword. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html */
|
|
1224
|
+
KnownTokenFilterName["KeywordRepeat"] = "keyword_repeat";
|
|
1225
|
+
/** A high-performance kstem filter for English. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html */
|
|
1226
|
+
KnownTokenFilterName["KStem"] = "kstem";
|
|
1227
|
+
/** Removes words that are too long or too short. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html */
|
|
1228
|
+
KnownTokenFilterName["Length"] = "length";
|
|
1229
|
+
/** Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html */
|
|
1230
|
+
KnownTokenFilterName["Limit"] = "limit";
|
|
1231
|
+
/** Normalizes token text to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.htm */
|
|
1232
|
+
KnownTokenFilterName["Lowercase"] = "lowercase";
|
|
1233
|
+
/** Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html */
|
|
1234
|
+
KnownTokenFilterName["NGram"] = "nGram_v2";
|
|
1235
|
+
/** Applies normalization for Persian. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html */
|
|
1236
|
+
KnownTokenFilterName["PersianNormalization"] = "persian_normalization";
|
|
1237
|
+
/** Create tokens for phonetic matches. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html */
|
|
1238
|
+
KnownTokenFilterName["Phonetic"] = "phonetic";
|
|
1239
|
+
/** Uses the Porter stemming algorithm to transform the token stream. See http:\//tartarus.org\/~martin\/PorterStemmer */
|
|
1240
|
+
KnownTokenFilterName["PorterStem"] = "porter_stem";
|
|
1241
|
+
/** Reverses the token string. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
|
|
1242
|
+
KnownTokenFilterName["Reverse"] = "reverse";
|
|
1243
|
+
/** Normalizes use of the interchangeable Scandinavian characters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html */
|
|
1244
|
+
KnownTokenFilterName["ScandinavianNormalization"] = "scandinavian_normalization";
|
|
1245
|
+
/** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html */
|
|
1246
|
+
KnownTokenFilterName["ScandinavianFoldingNormalization"] = "scandinavian_folding";
|
|
1247
|
+
/** Creates combinations of tokens as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html */
|
|
1248
|
+
KnownTokenFilterName["Shingle"] = "shingle";
|
|
1249
|
+
/** A filter that stems words using a Snowball-generated stemmer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html */
|
|
1250
|
+
KnownTokenFilterName["Snowball"] = "snowball";
|
|
1251
|
+
/** Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html */
|
|
1252
|
+
KnownTokenFilterName["SoraniNormalization"] = "sorani_normalization";
|
|
1253
|
+
/** Language specific stemming filter. See https:\//docs.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */
|
|
1254
|
+
KnownTokenFilterName["Stemmer"] = "stemmer";
|
|
1255
|
+
/** Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html */
|
|
1256
|
+
KnownTokenFilterName["Stopwords"] = "stopwords";
|
|
1257
|
+
/** Trims leading and trailing whitespace from tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html */
|
|
1258
|
+
KnownTokenFilterName["Trim"] = "trim";
|
|
1259
|
+
/** Truncates the terms to a specific length. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html */
|
|
1260
|
+
KnownTokenFilterName["Truncate"] = "truncate";
|
|
1261
|
+
/** Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html */
|
|
1262
|
+
KnownTokenFilterName["Unique"] = "unique";
|
|
1263
|
+
/** Normalizes token text to upper case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
|
|
1264
|
+
KnownTokenFilterName["Uppercase"] = "uppercase";
|
|
1265
|
+
/** Splits words into subwords and performs optional transformations on subword groups. */
|
|
1266
|
+
KnownTokenFilterName["WordDelimiter"] = "word_delimiter";
|
|
1267
|
+
})(KnownTokenFilterName || (KnownTokenFilterName = {}));
|
|
1268
|
+
/** Known values of {@link CharFilterName} that the service accepts. */
|
|
1269
|
+
export var KnownCharFilterName;
|
|
1270
|
+
(function (KnownCharFilterName) {
|
|
1271
|
+
/** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */
|
|
1272
|
+
KnownCharFilterName["HtmlStrip"] = "html_strip";
|
|
1273
|
+
})(KnownCharFilterName || (KnownCharFilterName = {}));
|
|
1297
1274
|
/** Known values of {@link RegexFlags} that the service accepts. */
|
|
1298
1275
|
export var KnownRegexFlags;
|
|
1299
1276
|
(function (KnownRegexFlags) {
|