@azure/search-documents 12.1.0 → 12.2.0-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/index.js +2710 -454
- package/dist/index.js.map +1 -1
- package/dist-esm/src/base64.browser.js +1 -1
- package/dist-esm/src/base64.browser.js.map +1 -1
- package/dist-esm/src/base64.js +1 -1
- package/dist-esm/src/base64.js.map +1 -1
- package/dist-esm/src/errorModels.js +1 -1
- package/dist-esm/src/errorModels.js.map +1 -1
- package/dist-esm/src/generated/data/models/index.js +220 -6
- package/dist-esm/src/generated/data/models/index.js.map +1 -1
- package/dist-esm/src/generated/data/models/mappers.js +481 -0
- package/dist-esm/src/generated/data/models/mappers.js.map +1 -1
- package/dist-esm/src/generated/data/models/parameters.js +51 -0
- package/dist-esm/src/generated/data/models/parameters.js.map +1 -1
- package/dist-esm/src/generated/data/operations/documents.js +5 -0
- package/dist-esm/src/generated/data/operations/documents.js.map +1 -1
- package/dist-esm/src/generated/data/searchClient.js +1 -1
- package/dist-esm/src/generated/data/searchClient.js.map +1 -1
- package/dist-esm/src/generated/service/models/index.js +210 -84
- package/dist-esm/src/generated/service/models/index.js.map +1 -1
- package/dist-esm/src/generated/service/models/mappers.js +815 -77
- package/dist-esm/src/generated/service/models/mappers.js.map +1 -1
- package/dist-esm/src/generated/service/models/parameters.js +51 -1
- package/dist-esm/src/generated/service/models/parameters.js.map +1 -1
- package/dist-esm/src/generated/service/operations/aliases.js +160 -0
- package/dist-esm/src/generated/service/operations/aliases.js.map +1 -0
- package/dist-esm/src/generated/service/operations/dataSources.js +4 -1
- package/dist-esm/src/generated/service/operations/dataSources.js.map +1 -1
- package/dist-esm/src/generated/service/operations/index.js +1 -0
- package/dist-esm/src/generated/service/operations/index.js.map +1 -1
- package/dist-esm/src/generated/service/operations/indexers.js +29 -1
- package/dist-esm/src/generated/service/operations/indexers.js.map +1 -1
- package/dist-esm/src/generated/service/operations/skillsets.js +30 -1
- package/dist-esm/src/generated/service/operations/skillsets.js.map +1 -1
- package/dist-esm/src/generated/service/operationsInterfaces/aliases.js +9 -0
- package/dist-esm/src/generated/service/operationsInterfaces/aliases.js.map +1 -0
- package/dist-esm/src/generated/service/operationsInterfaces/index.js +1 -0
- package/dist-esm/src/generated/service/operationsInterfaces/index.js.map +1 -1
- package/dist-esm/src/generated/service/operationsInterfaces/indexers.js.map +1 -1
- package/dist-esm/src/generated/service/operationsInterfaces/skillsets.js.map +1 -1
- package/dist-esm/src/generated/service/searchServiceClient.js +3 -2
- package/dist-esm/src/generated/service/searchServiceClient.js.map +1 -1
- package/dist-esm/src/geographyPoint.js +1 -1
- package/dist-esm/src/geographyPoint.js.map +1 -1
- package/dist-esm/src/index.js +4 -4
- package/dist-esm/src/index.js.map +1 -1
- package/dist-esm/src/indexDocumentsBatch.js +1 -1
- package/dist-esm/src/indexDocumentsBatch.js.map +1 -1
- package/dist-esm/src/indexModels.js +1 -1
- package/dist-esm/src/indexModels.js.map +1 -1
- package/dist-esm/src/logger.js +1 -1
- package/dist-esm/src/logger.js.map +1 -1
- package/dist-esm/src/odata.js +1 -1
- package/dist-esm/src/odata.js.map +1 -1
- package/dist-esm/src/odataMetadataPolicy.js +1 -1
- package/dist-esm/src/odataMetadataPolicy.js.map +1 -1
- package/dist-esm/src/searchApiKeyCredentialPolicy.js +1 -1
- package/dist-esm/src/searchApiKeyCredentialPolicy.js.map +1 -1
- package/dist-esm/src/searchAudience.js +1 -1
- package/dist-esm/src/searchAudience.js.map +1 -1
- package/dist-esm/src/searchClient.js +52 -8
- package/dist-esm/src/searchClient.js.map +1 -1
- package/dist-esm/src/searchIndexClient.js +158 -6
- package/dist-esm/src/searchIndexClient.js.map +1 -1
- package/dist-esm/src/searchIndexerClient.js +53 -2
- package/dist-esm/src/searchIndexerClient.js.map +1 -1
- package/dist-esm/src/searchIndexingBufferedSender.js +1 -1
- package/dist-esm/src/searchIndexingBufferedSender.js.map +1 -1
- package/dist-esm/src/serialization.js +1 -1
- package/dist-esm/src/serialization.js.map +1 -1
- package/dist-esm/src/serviceModels.js +1 -1
- package/dist-esm/src/serviceModels.js.map +1 -1
- package/dist-esm/src/serviceUtils.js +112 -26
- package/dist-esm/src/serviceUtils.js.map +1 -1
- package/dist-esm/src/synonymMapHelper.browser.js +1 -1
- package/dist-esm/src/synonymMapHelper.browser.js.map +1 -1
- package/dist-esm/src/synonymMapHelper.js +1 -1
- package/dist-esm/src/synonymMapHelper.js.map +1 -1
- package/dist-esm/src/tracing.js +1 -1
- package/dist-esm/src/tracing.js.map +1 -1
- package/dist-esm/src/walk.js +1 -1
- package/dist-esm/src/walk.js.map +1 -1
- package/package.json +13 -16
- package/types/search-documents.d.ts +1774 -108
|
@@ -5,12 +5,12 @@
|
|
|
5
5
|
* Code generated by Microsoft (R) AutoRest Code Generator.
|
|
6
6
|
* Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
|
7
7
|
*/
|
|
8
|
-
/** Known values of {@link
|
|
9
|
-
export var
|
|
10
|
-
(function (
|
|
11
|
-
/** Api Version '2024-
|
|
12
|
-
|
|
13
|
-
})(
|
|
8
|
+
/** Known values of {@link ApiVersion20241101Preview} that the service accepts. */
|
|
9
|
+
export var KnownApiVersion20241101Preview;
|
|
10
|
+
(function (KnownApiVersion20241101Preview) {
|
|
11
|
+
/** Api Version '2024-11-01-preview' */
|
|
12
|
+
KnownApiVersion20241101Preview["TwoThousandTwentyFour1101Preview"] = "2024-11-01-preview";
|
|
13
|
+
})(KnownApiVersion20241101Preview || (KnownApiVersion20241101Preview = {}));
|
|
14
14
|
/** Known values of {@link SearchIndexerDataSourceType} that the service accepts. */
|
|
15
15
|
export var KnownSearchIndexerDataSourceType;
|
|
16
16
|
(function (KnownSearchIndexerDataSourceType) {
|
|
@@ -26,6 +26,8 @@ export var KnownSearchIndexerDataSourceType;
|
|
|
26
26
|
KnownSearchIndexerDataSourceType["MySql"] = "mysql";
|
|
27
27
|
/** Indicates an ADLS Gen2 datasource. */
|
|
28
28
|
KnownSearchIndexerDataSourceType["AdlsGen2"] = "adlsgen2";
|
|
29
|
+
/** Indicates a Microsoft Fabric OneLake datasource. */
|
|
30
|
+
KnownSearchIndexerDataSourceType["OneLake"] = "onelake";
|
|
29
31
|
})(KnownSearchIndexerDataSourceType || (KnownSearchIndexerDataSourceType = {}));
|
|
30
32
|
/** Known values of {@link BlobIndexerParsingMode} that the service accepts. */
|
|
31
33
|
export var KnownBlobIndexerParsingMode;
|
|
@@ -42,7 +44,33 @@ export var KnownBlobIndexerParsingMode;
|
|
|
42
44
|
KnownBlobIndexerParsingMode["JsonArray"] = "jsonArray";
|
|
43
45
|
/** Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. */
|
|
44
46
|
KnownBlobIndexerParsingMode["JsonLines"] = "jsonLines";
|
|
47
|
+
/** Set to markdown to extract content from markdown files. */
|
|
48
|
+
KnownBlobIndexerParsingMode["Markdown"] = "markdown";
|
|
45
49
|
})(KnownBlobIndexerParsingMode || (KnownBlobIndexerParsingMode = {}));
|
|
50
|
+
/** Known values of {@link MarkdownParsingSubmode} that the service accepts. */
|
|
51
|
+
export var KnownMarkdownParsingSubmode;
|
|
52
|
+
(function (KnownMarkdownParsingSubmode) {
|
|
53
|
+
/** Indicates that each section of the markdown file (up to a specified depth) will be parsed into individual search documents. This can result in a single markdown file producing multiple search documents. This is the default sub-mode. */
|
|
54
|
+
KnownMarkdownParsingSubmode["OneToMany"] = "oneToMany";
|
|
55
|
+
/** Indicates that each markdown file will be parsed into a single search document. */
|
|
56
|
+
KnownMarkdownParsingSubmode["OneToOne"] = "oneToOne";
|
|
57
|
+
})(KnownMarkdownParsingSubmode || (KnownMarkdownParsingSubmode = {}));
|
|
58
|
+
/** Known values of {@link MarkdownHeaderDepth} that the service accepts. */
|
|
59
|
+
export var KnownMarkdownHeaderDepth;
|
|
60
|
+
(function (KnownMarkdownHeaderDepth) {
|
|
61
|
+
/** Indicates that headers up to a level of h1 will be considered while grouping markdown content. */
|
|
62
|
+
KnownMarkdownHeaderDepth["H1"] = "h1";
|
|
63
|
+
/** Indicates that headers up to a level of h2 will be considered while grouping markdown content. */
|
|
64
|
+
KnownMarkdownHeaderDepth["H2"] = "h2";
|
|
65
|
+
/** Indicates that headers up to a level of h3 will be considered while grouping markdown content. */
|
|
66
|
+
KnownMarkdownHeaderDepth["H3"] = "h3";
|
|
67
|
+
/** Indicates that headers up to a level of h4 will be considered while grouping markdown content. */
|
|
68
|
+
KnownMarkdownHeaderDepth["H4"] = "h4";
|
|
69
|
+
/** Indicates that headers up to a level of h5 will be considered while grouping markdown content. */
|
|
70
|
+
KnownMarkdownHeaderDepth["H5"] = "h5";
|
|
71
|
+
/** Indicates that headers up to a level of h6 will be considered while grouping markdown content. This is the default. */
|
|
72
|
+
KnownMarkdownHeaderDepth["H6"] = "h6";
|
|
73
|
+
})(KnownMarkdownHeaderDepth || (KnownMarkdownHeaderDepth = {}));
|
|
46
74
|
/** Known values of {@link BlobIndexerDataToExtract} that the service accepts. */
|
|
47
75
|
export var KnownBlobIndexerDataToExtract;
|
|
48
76
|
(function (KnownBlobIndexerDataToExtract) {
|
|
@@ -79,6 +107,20 @@ export var KnownIndexerExecutionEnvironment;
|
|
|
79
107
|
/** Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources. */
|
|
80
108
|
KnownIndexerExecutionEnvironment["Private"] = "private";
|
|
81
109
|
})(KnownIndexerExecutionEnvironment || (KnownIndexerExecutionEnvironment = {}));
|
|
110
|
+
/** Known values of {@link IndexerExecutionStatusDetail} that the service accepts. */
|
|
111
|
+
export var KnownIndexerExecutionStatusDetail;
|
|
112
|
+
(function (KnownIndexerExecutionStatusDetail) {
|
|
113
|
+
/** Indicates that the reset that occurred was for a call to ResetDocs. */
|
|
114
|
+
KnownIndexerExecutionStatusDetail["ResetDocs"] = "resetDocs";
|
|
115
|
+
})(KnownIndexerExecutionStatusDetail || (KnownIndexerExecutionStatusDetail = {}));
|
|
116
|
+
/** Known values of {@link IndexingMode} that the service accepts. */
|
|
117
|
+
export var KnownIndexingMode;
|
|
118
|
+
(function (KnownIndexingMode) {
|
|
119
|
+
/** The indexer is indexing all documents in the datasource. */
|
|
120
|
+
KnownIndexingMode["IndexingAllDocs"] = "indexingAllDocs";
|
|
121
|
+
/** The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status. */
|
|
122
|
+
KnownIndexingMode["IndexingResetDocs"] = "indexingResetDocs";
|
|
123
|
+
})(KnownIndexingMode || (KnownIndexingMode = {}));
|
|
82
124
|
/** Known values of {@link IndexProjectionMode} that the service accepts. */
|
|
83
125
|
export var KnownIndexProjectionMode;
|
|
84
126
|
(function (KnownIndexProjectionMode) {
|
|
@@ -307,6 +349,20 @@ export var KnownLexicalAnalyzerName;
|
|
|
307
349
|
/** An analyzer that uses the whitespace tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceAnalyzer.html */
|
|
308
350
|
KnownLexicalAnalyzerName["Whitespace"] = "whitespace";
|
|
309
351
|
})(KnownLexicalAnalyzerName || (KnownLexicalAnalyzerName = {}));
|
|
352
|
+
/** Known values of {@link LexicalNormalizerName} that the service accepts. */
|
|
353
|
+
export var KnownLexicalNormalizerName;
|
|
354
|
+
(function (KnownLexicalNormalizerName) {
|
|
355
|
+
/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
|
|
356
|
+
KnownLexicalNormalizerName["AsciiFolding"] = "asciifolding";
|
|
357
|
+
/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
|
|
358
|
+
KnownLexicalNormalizerName["Elision"] = "elision";
|
|
359
|
+
/** Normalizes token text to lowercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
|
|
360
|
+
KnownLexicalNormalizerName["Lowercase"] = "lowercase";
|
|
361
|
+
/** Standard normalizer, which consists of lowercase and asciifolding. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
|
|
362
|
+
KnownLexicalNormalizerName["Standard"] = "standard";
|
|
363
|
+
/** Normalizes token text to uppercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
|
|
364
|
+
KnownLexicalNormalizerName["Uppercase"] = "uppercase";
|
|
365
|
+
})(KnownLexicalNormalizerName || (KnownLexicalNormalizerName = {}));
|
|
310
366
|
/** Known values of {@link VectorEncodingFormat} that the service accepts. */
|
|
311
367
|
export var KnownVectorEncodingFormat;
|
|
312
368
|
(function (KnownVectorEncodingFormat) {
|
|
@@ -328,6 +384,10 @@ export var KnownVectorSearchVectorizerKind;
|
|
|
328
384
|
KnownVectorSearchVectorizerKind["AzureOpenAI"] = "azureOpenAI";
|
|
329
385
|
/** Generate embeddings using a custom web endpoint at query time. */
|
|
330
386
|
KnownVectorSearchVectorizerKind["CustomWebApi"] = "customWebApi";
|
|
387
|
+
/** Generate embeddings for an image or text input at query time using the Azure AI Services Vision Vectorize API. */
|
|
388
|
+
KnownVectorSearchVectorizerKind["AIServicesVision"] = "aiServicesVision";
|
|
389
|
+
/** Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Studio Model Catalog at query time. */
|
|
390
|
+
KnownVectorSearchVectorizerKind["AML"] = "aml";
|
|
331
391
|
})(KnownVectorSearchVectorizerKind || (KnownVectorSearchVectorizerKind = {}));
|
|
332
392
|
/** Known values of {@link VectorSearchCompressionKind} that the service accepts. */
|
|
333
393
|
export var KnownVectorSearchCompressionKind;
|
|
@@ -337,6 +397,92 @@ export var KnownVectorSearchCompressionKind;
|
|
|
337
397
|
/** Binary Quantization, a type of compression method. In binary quantization, the original vectors values are compressed to the narrower binary type by discretizing and representing each component of a vector using binary values, thereby reducing the overall data size. */
|
|
338
398
|
KnownVectorSearchCompressionKind["BinaryQuantization"] = "binaryQuantization";
|
|
339
399
|
})(KnownVectorSearchCompressionKind || (KnownVectorSearchCompressionKind = {}));
|
|
400
|
+
/** Known values of {@link VectorSearchCompressionRescoreStorageMethod} that the service accepts. */
|
|
401
|
+
export var KnownVectorSearchCompressionRescoreStorageMethod;
|
|
402
|
+
(function (KnownVectorSearchCompressionRescoreStorageMethod) {
|
|
403
|
+
/** This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. */
|
|
404
|
+
KnownVectorSearchCompressionRescoreStorageMethod["PreserveOriginals"] = "preserveOriginals";
|
|
405
|
+
/** This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality. */
|
|
406
|
+
KnownVectorSearchCompressionRescoreStorageMethod["DiscardOriginals"] = "discardOriginals";
|
|
407
|
+
})(KnownVectorSearchCompressionRescoreStorageMethod || (KnownVectorSearchCompressionRescoreStorageMethod = {}));
|
|
408
|
+
/** Known values of {@link TokenFilterName} that the service accepts. */
|
|
409
|
+
export var KnownTokenFilterName;
|
|
410
|
+
(function (KnownTokenFilterName) {
|
|
411
|
+
/** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html */
|
|
412
|
+
KnownTokenFilterName["ArabicNormalization"] = "arabic_normalization";
|
|
413
|
+
/** Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html */
|
|
414
|
+
KnownTokenFilterName["Apostrophe"] = "apostrophe";
|
|
415
|
+
/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
|
|
416
|
+
KnownTokenFilterName["AsciiFolding"] = "asciifolding";
|
|
417
|
+
/** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html */
|
|
418
|
+
KnownTokenFilterName["CjkBigram"] = "cjk_bigram";
|
|
419
|
+
/** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html */
|
|
420
|
+
KnownTokenFilterName["CjkWidth"] = "cjk_width";
|
|
421
|
+
/** Removes English possessives, and dots from acronyms. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html */
|
|
422
|
+
KnownTokenFilterName["Classic"] = "classic";
|
|
423
|
+
/** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html */
|
|
424
|
+
KnownTokenFilterName["CommonGram"] = "common_grams";
|
|
425
|
+
/** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html */
|
|
426
|
+
KnownTokenFilterName["EdgeNGram"] = "edgeNGram_v2";
|
|
427
|
+
/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
|
|
428
|
+
KnownTokenFilterName["Elision"] = "elision";
|
|
429
|
+
/** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html */
|
|
430
|
+
KnownTokenFilterName["GermanNormalization"] = "german_normalization";
|
|
431
|
+
/** Normalizes text in Hindi to remove some differences in spelling variations. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html */
|
|
432
|
+
KnownTokenFilterName["HindiNormalization"] = "hindi_normalization";
|
|
433
|
+
/** Normalizes the Unicode representation of text in Indian languages. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html */
|
|
434
|
+
KnownTokenFilterName["IndicNormalization"] = "indic_normalization";
|
|
435
|
+
/** Emits each incoming token twice, once as keyword and once as non-keyword. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html */
|
|
436
|
+
KnownTokenFilterName["KeywordRepeat"] = "keyword_repeat";
|
|
437
|
+
/** A high-performance kstem filter for English. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html */
|
|
438
|
+
KnownTokenFilterName["KStem"] = "kstem";
|
|
439
|
+
/** Removes words that are too long or too short. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html */
|
|
440
|
+
KnownTokenFilterName["Length"] = "length";
|
|
441
|
+
/** Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html */
|
|
442
|
+
KnownTokenFilterName["Limit"] = "limit";
|
|
443
|
+
/** Normalizes token text to lower case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
|
|
444
|
+
KnownTokenFilterName["Lowercase"] = "lowercase";
|
|
445
|
+
/** Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html */
|
|
446
|
+
KnownTokenFilterName["NGram"] = "nGram_v2";
|
|
447
|
+
/** Applies normalization for Persian. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html */
|
|
448
|
+
KnownTokenFilterName["PersianNormalization"] = "persian_normalization";
|
|
449
|
+
/** Create tokens for phonetic matches. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html */
|
|
450
|
+
KnownTokenFilterName["Phonetic"] = "phonetic";
|
|
451
|
+
/** Uses the Porter stemming algorithm to transform the token stream. See http:\//tartarus.org\/~martin\/PorterStemmer */
|
|
452
|
+
KnownTokenFilterName["PorterStem"] = "porter_stem";
|
|
453
|
+
/** Reverses the token string. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
|
|
454
|
+
KnownTokenFilterName["Reverse"] = "reverse";
|
|
455
|
+
/** Normalizes use of the interchangeable Scandinavian characters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html */
|
|
456
|
+
KnownTokenFilterName["ScandinavianNormalization"] = "scandinavian_normalization";
|
|
457
|
+
/** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html */
|
|
458
|
+
KnownTokenFilterName["ScandinavianFoldingNormalization"] = "scandinavian_folding";
|
|
459
|
+
/** Creates combinations of tokens as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html */
|
|
460
|
+
KnownTokenFilterName["Shingle"] = "shingle";
|
|
461
|
+
/** A filter that stems words using a Snowball-generated stemmer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html */
|
|
462
|
+
KnownTokenFilterName["Snowball"] = "snowball";
|
|
463
|
+
/** Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html */
|
|
464
|
+
KnownTokenFilterName["SoraniNormalization"] = "sorani_normalization";
|
|
465
|
+
/** Language specific stemming filter. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */
|
|
466
|
+
KnownTokenFilterName["Stemmer"] = "stemmer";
|
|
467
|
+
/** Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html */
|
|
468
|
+
KnownTokenFilterName["Stopwords"] = "stopwords";
|
|
469
|
+
/** Trims leading and trailing whitespace from tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html */
|
|
470
|
+
KnownTokenFilterName["Trim"] = "trim";
|
|
471
|
+
/** Truncates the terms to a specific length. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html */
|
|
472
|
+
KnownTokenFilterName["Truncate"] = "truncate";
|
|
473
|
+
/** Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html */
|
|
474
|
+
KnownTokenFilterName["Unique"] = "unique";
|
|
475
|
+
/** Normalizes token text to upper case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
|
|
476
|
+
KnownTokenFilterName["Uppercase"] = "uppercase";
|
|
477
|
+
/** Splits words into subwords and performs optional transformations on subword groups. */
|
|
478
|
+
KnownTokenFilterName["WordDelimiter"] = "word_delimiter";
|
|
479
|
+
})(KnownTokenFilterName || (KnownTokenFilterName = {}));
|
|
480
|
+
/** Known values of {@link CharFilterName} that the service accepts. */
|
|
481
|
+
export var KnownCharFilterName;
|
|
482
|
+
(function (KnownCharFilterName) {
|
|
483
|
+
/** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */
|
|
484
|
+
KnownCharFilterName["HtmlStrip"] = "html_strip";
|
|
485
|
+
})(KnownCharFilterName || (KnownCharFilterName = {}));
|
|
340
486
|
/** Known values of {@link VectorSearchAlgorithmMetric} that the service accepts. */
|
|
341
487
|
export var KnownVectorSearchAlgorithmMetric;
|
|
342
488
|
(function (KnownVectorSearchAlgorithmMetric) {
|
|
@@ -365,6 +511,22 @@ export var KnownAzureOpenAIModelName;
|
|
|
365
511
|
/** TextEmbedding3Small */
|
|
366
512
|
KnownAzureOpenAIModelName["TextEmbedding3Small"] = "text-embedding-3-small";
|
|
367
513
|
})(KnownAzureOpenAIModelName || (KnownAzureOpenAIModelName = {}));
|
|
514
|
+
/** Known values of {@link AIStudioModelCatalogName} that the service accepts. */
|
|
515
|
+
export var KnownAIStudioModelCatalogName;
|
|
516
|
+
(function (KnownAIStudioModelCatalogName) {
|
|
517
|
+
/** OpenAIClipImageTextEmbeddingsVitBasePatch32 */
|
|
518
|
+
KnownAIStudioModelCatalogName["OpenAIClipImageTextEmbeddingsVitBasePatch32"] = "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32";
|
|
519
|
+
/** OpenAIClipImageTextEmbeddingsViTLargePatch14336 */
|
|
520
|
+
KnownAIStudioModelCatalogName["OpenAIClipImageTextEmbeddingsViTLargePatch14336"] = "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336";
|
|
521
|
+
/** FacebookDinoV2ImageEmbeddingsViTBase */
|
|
522
|
+
KnownAIStudioModelCatalogName["FacebookDinoV2ImageEmbeddingsViTBase"] = "Facebook-DinoV2-Image-Embeddings-ViT-Base";
|
|
523
|
+
/** FacebookDinoV2ImageEmbeddingsViTGiant */
|
|
524
|
+
KnownAIStudioModelCatalogName["FacebookDinoV2ImageEmbeddingsViTGiant"] = "Facebook-DinoV2-Image-Embeddings-ViT-Giant";
|
|
525
|
+
/** CohereEmbedV3English */
|
|
526
|
+
KnownAIStudioModelCatalogName["CohereEmbedV3English"] = "Cohere-embed-v3-english";
|
|
527
|
+
/** CohereEmbedV3Multilingual */
|
|
528
|
+
KnownAIStudioModelCatalogName["CohereEmbedV3Multilingual"] = "Cohere-embed-v3-multilingual";
|
|
529
|
+
})(KnownAIStudioModelCatalogName || (KnownAIStudioModelCatalogName = {}));
|
|
368
530
|
/** Known values of {@link KeyPhraseExtractionSkillLanguage} that the service accepts. */
|
|
369
531
|
export var KnownKeyPhraseExtractionSkillLanguage;
|
|
370
532
|
(function (KnownKeyPhraseExtractionSkillLanguage) {
|
|
@@ -1079,6 +1241,26 @@ export var KnownTextSplitMode;
|
|
|
1079
1241
|
/** Split the text into individual sentences. */
|
|
1080
1242
|
KnownTextSplitMode["Sentences"] = "sentences";
|
|
1081
1243
|
})(KnownTextSplitMode || (KnownTextSplitMode = {}));
|
|
1244
|
+
/** Known values of {@link SplitSkillUnit} that the service accepts. */
|
|
1245
|
+
export var KnownSplitSkillUnit;
|
|
1246
|
+
(function (KnownSplitSkillUnit) {
|
|
1247
|
+
/** The length will be measured by character. */
|
|
1248
|
+
KnownSplitSkillUnit["Characters"] = "characters";
|
|
1249
|
+
/** The length will be measured by an AzureOpenAI tokenizer from the tiktoken library. */
|
|
1250
|
+
KnownSplitSkillUnit["AzureOpenAITokens"] = "azureOpenAITokens";
|
|
1251
|
+
})(KnownSplitSkillUnit || (KnownSplitSkillUnit = {}));
|
|
1252
|
+
/** Known values of {@link SplitSkillEncoderModelName} that the service accepts. */
|
|
1253
|
+
export var KnownSplitSkillEncoderModelName;
|
|
1254
|
+
(function (KnownSplitSkillEncoderModelName) {
|
|
1255
|
+
/** Refers to a base model trained with a 50,000 token vocabulary, often used in general natural language processing tasks. */
|
|
1256
|
+
KnownSplitSkillEncoderModelName["R50KBase"] = "r50k_base";
|
|
1257
|
+
/** A base model with a 50,000 token vocabulary, optimized for prompt-based tasks. */
|
|
1258
|
+
KnownSplitSkillEncoderModelName["P50KBase"] = "p50k_base";
|
|
1259
|
+
/** Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token vocabulary. */
|
|
1260
|
+
KnownSplitSkillEncoderModelName["P50KEdit"] = "p50k_edit";
|
|
1261
|
+
/** A base model with a 100,000 token vocabulary. */
|
|
1262
|
+
KnownSplitSkillEncoderModelName["CL100KBase"] = "cl100k_base";
|
|
1263
|
+
})(KnownSplitSkillEncoderModelName || (KnownSplitSkillEncoderModelName = {}));
|
|
1082
1264
|
/** Known values of {@link CustomEntityLookupSkillLanguage} that the service accepts. */
|
|
1083
1265
|
export var KnownCustomEntityLookupSkillLanguage;
|
|
1084
1266
|
(function (KnownCustomEntityLookupSkillLanguage) {
|
|
@@ -1249,6 +1431,28 @@ export var KnownTextTranslationSkillLanguage;
|
|
|
1249
1431
|
/** Punjabi */
|
|
1250
1432
|
KnownTextTranslationSkillLanguage["Pa"] = "pa";
|
|
1251
1433
|
})(KnownTextTranslationSkillLanguage || (KnownTextTranslationSkillLanguage = {}));
|
|
1434
|
+
/** Known values of {@link DocumentIntelligenceLayoutSkillOutputMode} that the service accepts. */
|
|
1435
|
+
export var KnownDocumentIntelligenceLayoutSkillOutputMode;
|
|
1436
|
+
(function (KnownDocumentIntelligenceLayoutSkillOutputMode) {
|
|
1437
|
+
/** Specify the deepest markdown header section to parse. */
|
|
1438
|
+
KnownDocumentIntelligenceLayoutSkillOutputMode["OneToMany"] = "oneToMany";
|
|
1439
|
+
})(KnownDocumentIntelligenceLayoutSkillOutputMode || (KnownDocumentIntelligenceLayoutSkillOutputMode = {}));
|
|
1440
|
+
/** Known values of {@link DocumentIntelligenceLayoutSkillMarkdownHeaderDepth} that the service accepts. */
|
|
1441
|
+
export var KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth;
|
|
1442
|
+
(function (KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth) {
|
|
1443
|
+
/** Header level 1. */
|
|
1444
|
+
KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth["H1"] = "h1";
|
|
1445
|
+
/** Header level 2. */
|
|
1446
|
+
KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth["H2"] = "h2";
|
|
1447
|
+
/** Header level 3. */
|
|
1448
|
+
KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth["H3"] = "h3";
|
|
1449
|
+
/** Header level 4. */
|
|
1450
|
+
KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth["H4"] = "h4";
|
|
1451
|
+
/** Header level 5. */
|
|
1452
|
+
KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth["H5"] = "h5";
|
|
1453
|
+
/** Header level 6. */
|
|
1454
|
+
KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth["H6"] = "h6";
|
|
1455
|
+
})(KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth || (KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth = {}));
|
|
1252
1456
|
/** Known values of {@link LexicalTokenizerName} that the service accepts. */
|
|
1253
1457
|
export var KnownLexicalTokenizerName;
|
|
1254
1458
|
(function (KnownLexicalTokenizerName) {
|
|
@@ -1279,84 +1483,6 @@ export var KnownLexicalTokenizerName;
|
|
|
1279
1483
|
/** Divides text at whitespace. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html */
|
|
1280
1484
|
KnownLexicalTokenizerName["Whitespace"] = "whitespace";
|
|
1281
1485
|
})(KnownLexicalTokenizerName || (KnownLexicalTokenizerName = {}));
|
|
1282
|
-
/** Known values of {@link TokenFilterName} that the service accepts. */
|
|
1283
|
-
export var KnownTokenFilterName;
|
|
1284
|
-
(function (KnownTokenFilterName) {
|
|
1285
|
-
/** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html */
|
|
1286
|
-
KnownTokenFilterName["ArabicNormalization"] = "arabic_normalization";
|
|
1287
|
-
/** Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html */
|
|
1288
|
-
KnownTokenFilterName["Apostrophe"] = "apostrophe";
|
|
1289
|
-
/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
|
|
1290
|
-
KnownTokenFilterName["AsciiFolding"] = "asciifolding";
|
|
1291
|
-
/** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html */
|
|
1292
|
-
KnownTokenFilterName["CjkBigram"] = "cjk_bigram";
|
|
1293
|
-
/** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html */
|
|
1294
|
-
KnownTokenFilterName["CjkWidth"] = "cjk_width";
|
|
1295
|
-
/** Removes English possessives, and dots from acronyms. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html */
|
|
1296
|
-
KnownTokenFilterName["Classic"] = "classic";
|
|
1297
|
-
/** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html */
|
|
1298
|
-
KnownTokenFilterName["CommonGram"] = "common_grams";
|
|
1299
|
-
/** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html */
|
|
1300
|
-
KnownTokenFilterName["EdgeNGram"] = "edgeNGram_v2";
|
|
1301
|
-
/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
|
|
1302
|
-
KnownTokenFilterName["Elision"] = "elision";
|
|
1303
|
-
/** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html */
|
|
1304
|
-
KnownTokenFilterName["GermanNormalization"] = "german_normalization";
|
|
1305
|
-
/** Normalizes text in Hindi to remove some differences in spelling variations. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html */
|
|
1306
|
-
KnownTokenFilterName["HindiNormalization"] = "hindi_normalization";
|
|
1307
|
-
/** Normalizes the Unicode representation of text in Indian languages. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html */
|
|
1308
|
-
KnownTokenFilterName["IndicNormalization"] = "indic_normalization";
|
|
1309
|
-
/** Emits each incoming token twice, once as keyword and once as non-keyword. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html */
|
|
1310
|
-
KnownTokenFilterName["KeywordRepeat"] = "keyword_repeat";
|
|
1311
|
-
/** A high-performance kstem filter for English. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html */
|
|
1312
|
-
KnownTokenFilterName["KStem"] = "kstem";
|
|
1313
|
-
/** Removes words that are too long or too short. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html */
|
|
1314
|
-
KnownTokenFilterName["Length"] = "length";
|
|
1315
|
-
/** Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html */
|
|
1316
|
-
KnownTokenFilterName["Limit"] = "limit";
|
|
1317
|
-
/** Normalizes token text to lower case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
|
|
1318
|
-
KnownTokenFilterName["Lowercase"] = "lowercase";
|
|
1319
|
-
/** Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html */
|
|
1320
|
-
KnownTokenFilterName["NGram"] = "nGram_v2";
|
|
1321
|
-
/** Applies normalization for Persian. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html */
|
|
1322
|
-
KnownTokenFilterName["PersianNormalization"] = "persian_normalization";
|
|
1323
|
-
/** Create tokens for phonetic matches. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html */
|
|
1324
|
-
KnownTokenFilterName["Phonetic"] = "phonetic";
|
|
1325
|
-
/** Uses the Porter stemming algorithm to transform the token stream. See http:\//tartarus.org\/~martin\/PorterStemmer */
|
|
1326
|
-
KnownTokenFilterName["PorterStem"] = "porter_stem";
|
|
1327
|
-
/** Reverses the token string. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
|
|
1328
|
-
KnownTokenFilterName["Reverse"] = "reverse";
|
|
1329
|
-
/** Normalizes use of the interchangeable Scandinavian characters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html */
|
|
1330
|
-
KnownTokenFilterName["ScandinavianNormalization"] = "scandinavian_normalization";
|
|
1331
|
-
/** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html */
|
|
1332
|
-
KnownTokenFilterName["ScandinavianFoldingNormalization"] = "scandinavian_folding";
|
|
1333
|
-
/** Creates combinations of tokens as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html */
|
|
1334
|
-
KnownTokenFilterName["Shingle"] = "shingle";
|
|
1335
|
-
/** A filter that stems words using a Snowball-generated stemmer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html */
|
|
1336
|
-
KnownTokenFilterName["Snowball"] = "snowball";
|
|
1337
|
-
/** Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html */
|
|
1338
|
-
KnownTokenFilterName["SoraniNormalization"] = "sorani_normalization";
|
|
1339
|
-
/** Language specific stemming filter. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */
|
|
1340
|
-
KnownTokenFilterName["Stemmer"] = "stemmer";
|
|
1341
|
-
/** Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html */
|
|
1342
|
-
KnownTokenFilterName["Stopwords"] = "stopwords";
|
|
1343
|
-
/** Trims leading and trailing whitespace from tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html */
|
|
1344
|
-
KnownTokenFilterName["Trim"] = "trim";
|
|
1345
|
-
/** Truncates the terms to a specific length. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html */
|
|
1346
|
-
KnownTokenFilterName["Truncate"] = "truncate";
|
|
1347
|
-
/** Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html */
|
|
1348
|
-
KnownTokenFilterName["Unique"] = "unique";
|
|
1349
|
-
/** Normalizes token text to upper case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
|
|
1350
|
-
KnownTokenFilterName["Uppercase"] = "uppercase";
|
|
1351
|
-
/** Splits words into subwords and performs optional transformations on subword groups. */
|
|
1352
|
-
KnownTokenFilterName["WordDelimiter"] = "word_delimiter";
|
|
1353
|
-
})(KnownTokenFilterName || (KnownTokenFilterName = {}));
|
|
1354
|
-
/** Known values of {@link CharFilterName} that the service accepts. */
|
|
1355
|
-
export var KnownCharFilterName;
|
|
1356
|
-
(function (KnownCharFilterName) {
|
|
1357
|
-
/** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */
|
|
1358
|
-
KnownCharFilterName["HtmlStrip"] = "html_strip";
|
|
1359
|
-
})(KnownCharFilterName || (KnownCharFilterName = {}));
|
|
1360
1486
|
/** Known values of {@link RegexFlags} that the service accepts. */
|
|
1361
1487
|
export var KnownRegexFlags;
|
|
1362
1488
|
(function (KnownRegexFlags) {
|