@azure/search-documents 12.0.0-beta.4 → 12.1.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/README.md +52 -32
  2. package/dist/index.js +13493 -13180
  3. package/dist/index.js.map +1 -1
  4. package/dist-esm/src/constants.js +2 -1
  5. package/dist-esm/src/constants.js.map +1 -1
  6. package/dist-esm/src/errorModels.js +4 -0
  7. package/dist-esm/src/errorModels.js.map +1 -0
  8. package/dist-esm/src/generated/data/models/index.js +37 -53
  9. package/dist-esm/src/generated/data/models/index.js.map +1 -1
  10. package/dist-esm/src/generated/data/models/mappers.js +398 -331
  11. package/dist-esm/src/generated/data/models/mappers.js.map +1 -1
  12. package/dist-esm/src/generated/data/models/parameters.js +195 -195
  13. package/dist-esm/src/generated/data/models/parameters.js.map +1 -1
  14. package/dist-esm/src/generated/data/operations/documents.js +41 -41
  15. package/dist-esm/src/generated/data/operations/documents.js.map +1 -1
  16. package/dist-esm/src/generated/data/operationsInterfaces/documents.js.map +1 -1
  17. package/dist-esm/src/generated/data/searchClient.js +30 -4
  18. package/dist-esm/src/generated/data/searchClient.js.map +1 -1
  19. package/dist-esm/src/generated/service/models/index.js +138 -69
  20. package/dist-esm/src/generated/service/models/index.js.map +1 -1
  21. package/dist-esm/src/generated/service/models/mappers.js +1821 -1663
  22. package/dist-esm/src/generated/service/models/mappers.js.map +1 -1
  23. package/dist-esm/src/generated/service/models/parameters.js +64 -64
  24. package/dist-esm/src/generated/service/models/parameters.js.map +1 -1
  25. package/dist-esm/src/generated/service/operations/aliases.js +22 -22
  26. package/dist-esm/src/generated/service/operations/aliases.js.map +1 -1
  27. package/dist-esm/src/generated/service/operations/dataSources.js +23 -23
  28. package/dist-esm/src/generated/service/operations/dataSources.js.map +1 -1
  29. package/dist-esm/src/generated/service/operations/indexers.js +36 -36
  30. package/dist-esm/src/generated/service/operations/indexers.js.map +1 -1
  31. package/dist-esm/src/generated/service/operations/indexes.js +30 -30
  32. package/dist-esm/src/generated/service/operations/indexes.js.map +1 -1
  33. package/dist-esm/src/generated/service/operations/skillsets.js +26 -26
  34. package/dist-esm/src/generated/service/operations/skillsets.js.map +1 -1
  35. package/dist-esm/src/generated/service/operations/synonymMaps.js +22 -22
  36. package/dist-esm/src/generated/service/operations/synonymMaps.js.map +1 -1
  37. package/dist-esm/src/generated/service/operationsInterfaces/aliases.js.map +1 -1
  38. package/dist-esm/src/generated/service/operationsInterfaces/dataSources.js.map +1 -1
  39. package/dist-esm/src/generated/service/operationsInterfaces/indexers.js.map +1 -1
  40. package/dist-esm/src/generated/service/operationsInterfaces/indexes.js.map +1 -1
  41. package/dist-esm/src/generated/service/operationsInterfaces/skillsets.js.map +1 -1
  42. package/dist-esm/src/generated/service/operationsInterfaces/synonymMaps.js.map +1 -1
  43. package/dist-esm/src/generated/service/searchServiceClient.js +35 -9
  44. package/dist-esm/src/generated/service/searchServiceClient.js.map +1 -1
  45. package/dist-esm/src/generatedStringLiteralUnions.js +4 -0
  46. package/dist-esm/src/generatedStringLiteralUnions.js.map +1 -0
  47. package/dist-esm/src/index.js +8 -9
  48. package/dist-esm/src/index.js.map +1 -1
  49. package/dist-esm/src/indexDocumentsBatch.js.map +1 -1
  50. package/dist-esm/src/indexModels.js.map +1 -1
  51. package/dist-esm/src/odata.js.map +1 -1
  52. package/dist-esm/src/odataMetadataPolicy.js.map +1 -1
  53. package/dist-esm/src/searchApiKeyCredentialPolicy.js.map +1 -1
  54. package/dist-esm/src/searchClient.js +36 -27
  55. package/dist-esm/src/searchClient.js.map +1 -1
  56. package/dist-esm/src/searchIndexClient.js +15 -29
  57. package/dist-esm/src/searchIndexClient.js.map +1 -1
  58. package/dist-esm/src/searchIndexerClient.js +9 -6
  59. package/dist-esm/src/searchIndexerClient.js.map +1 -1
  60. package/dist-esm/src/searchIndexingBufferedSender.js +3 -8
  61. package/dist-esm/src/searchIndexingBufferedSender.js.map +1 -1
  62. package/dist-esm/src/serialization.js.map +1 -1
  63. package/dist-esm/src/serviceModels.js.map +1 -1
  64. package/dist-esm/src/serviceUtils.js +44 -67
  65. package/dist-esm/src/serviceUtils.js.map +1 -1
  66. package/dist-esm/src/synonymMapHelper.browser.js.map +1 -1
  67. package/dist-esm/src/synonymMapHelper.js +1 -1
  68. package/dist-esm/src/synonymMapHelper.js.map +1 -1
  69. package/dist-esm/src/tracing.js +1 -1
  70. package/dist-esm/src/tracing.js.map +1 -1
  71. package/package.json +42 -43
  72. package/types/search-documents.d.ts +1014 -1458
@@ -5,12 +5,12 @@
5
5
  * Code generated by Microsoft (R) AutoRest Code Generator.
6
6
  * Changes may cause incorrect behavior and will be lost if the code is regenerated.
7
7
  */
8
- /** Known values of {@link ApiVersion20231001Preview} that the service accepts. */
9
- export var KnownApiVersion20231001Preview;
10
- (function (KnownApiVersion20231001Preview) {
11
- /** Api Version '2023-10-01-Preview' */
12
- KnownApiVersion20231001Preview["TwoThousandTwentyThree1001Preview"] = "2023-10-01-Preview";
13
- })(KnownApiVersion20231001Preview || (KnownApiVersion20231001Preview = {}));
8
+ /** Known values of {@link ApiVersion20240301Preview} that the service accepts. */
9
+ export var KnownApiVersion20240301Preview;
10
+ (function (KnownApiVersion20240301Preview) {
11
+ /** Api Version '2024-03-01-Preview' */
12
+ KnownApiVersion20240301Preview["TwoThousandTwentyFour0301Preview"] = "2024-03-01-Preview";
13
+ })(KnownApiVersion20240301Preview || (KnownApiVersion20240301Preview = {}));
14
14
  /** Known values of {@link SearchIndexerDataSourceType} that the service accepts. */
15
15
  export var KnownSearchIndexerDataSourceType;
16
16
  (function (KnownSearchIndexerDataSourceType) {
@@ -38,9 +38,9 @@ export var KnownBlobIndexerParsingMode;
38
38
  KnownBlobIndexerParsingMode["DelimitedText"] = "delimitedText";
39
39
  /** Set to json to extract structured content from JSON files. */
40
40
  KnownBlobIndexerParsingMode["Json"] = "json";
41
- /** Set to jsonArray to extract individual elements of a JSON array as separate documents in Azure Cognitive Search. */
41
+ /** Set to jsonArray to extract individual elements of a JSON array as separate documents. */
42
42
  KnownBlobIndexerParsingMode["JsonArray"] = "jsonArray";
43
- /** Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents in Azure Cognitive Search. */
43
+ /** Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. */
44
44
  KnownBlobIndexerParsingMode["JsonLines"] = "jsonLines";
45
45
  })(KnownBlobIndexerParsingMode || (KnownBlobIndexerParsingMode = {}));
46
46
  /** Known values of {@link BlobIndexerDataToExtract} that the service accepts. */
@@ -74,7 +74,7 @@ export var KnownBlobIndexerPDFTextRotationAlgorithm;
74
74
  /** Known values of {@link IndexerExecutionEnvironment} that the service accepts. */
75
75
  export var KnownIndexerExecutionEnvironment;
76
76
  (function (KnownIndexerExecutionEnvironment) {
77
- /** Indicates that Azure Cognitive Search can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. */
77
+ /** Indicates that the search service can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. */
78
78
  KnownIndexerExecutionEnvironment["Standard"] = "standard";
79
79
  /** Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources. */
80
80
  KnownIndexerExecutionEnvironment["Private"] = "private";
@@ -114,7 +114,7 @@ export var KnownSearchFieldDataType;
114
114
  KnownSearchFieldDataType["Double"] = "Edm.Double";
115
115
  /** Indicates that a field contains a Boolean value (true or false). */
116
116
  KnownSearchFieldDataType["Boolean"] = "Edm.Boolean";
117
- /** Indicates that a field contains a date/time value, including timezone information. */
117
+ /** Indicates that a field contains a date\/time value, including timezone information. */
118
118
  KnownSearchFieldDataType["DateTimeOffset"] = "Edm.DateTimeOffset";
119
119
  /** Indicates that a field contains a geo-location in terms of longitude and latitude. */
120
120
  KnownSearchFieldDataType["GeographyPoint"] = "Edm.GeographyPoint";
@@ -122,6 +122,12 @@ export var KnownSearchFieldDataType;
122
122
  KnownSearchFieldDataType["Complex"] = "Edm.ComplexType";
123
123
  /** Indicates that a field contains a single-precision floating point number. This is only valid when used with Collection(Edm.Single). */
124
124
  KnownSearchFieldDataType["Single"] = "Edm.Single";
125
+ /** Indicates that a field contains a half-precision floating point number. This is only valid when used with Collection(Edm.Half). */
126
+ KnownSearchFieldDataType["Half"] = "Edm.Half";
127
+ /** Indicates that a field contains a 16-bit signed integer. This is only valid when used with Collection(Edm.Int16). */
128
+ KnownSearchFieldDataType["Int16"] = "Edm.Int16";
129
+ /** Indicates that a field contains a 8-bit signed integer. This is only valid when used with Collection(Edm.SByte). */
130
+ KnownSearchFieldDataType["SByte"] = "Edm.SByte";
125
131
  })(KnownSearchFieldDataType || (KnownSearchFieldDataType = {}));
126
132
  /** Known values of {@link LexicalAnalyzerName} that the service accepts. */
127
133
  export var KnownLexicalAnalyzerName;
@@ -300,37 +306,37 @@ export var KnownLexicalAnalyzerName;
300
306
  KnownLexicalAnalyzerName["ViMicrosoft"] = "vi.microsoft";
301
307
  /** Standard Lucene analyzer. */
302
308
  KnownLexicalAnalyzerName["StandardLucene"] = "standard.lucene";
303
- /** Standard ASCII Folding Lucene analyzer. See https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#Analyzers */
309
+ /** Standard ASCII Folding Lucene analyzer. See https:\//docs.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#Analyzers */
304
310
  KnownLexicalAnalyzerName["StandardAsciiFoldingLucene"] = "standardasciifolding.lucene";
305
- /** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordAnalyzer.html */
311
+ /** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordAnalyzer.html */
306
312
  KnownLexicalAnalyzerName["Keyword"] = "keyword";
307
- /** Flexibly separates text into terms via a regular expression pattern. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.html */
313
+ /** Flexibly separates text into terms via a regular expression pattern. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/PatternAnalyzer.html */
308
314
  KnownLexicalAnalyzerName["Pattern"] = "pattern";
309
- /** Divides text at non-letters and converts them to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/SimpleAnalyzer.html */
315
+ /** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/SimpleAnalyzer.html */
310
316
  KnownLexicalAnalyzerName["Simple"] = "simple";
311
- /** Divides text at non-letters; Applies the lowercase and stopword token filters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopAnalyzer.html */
317
+ /** Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopAnalyzer.html */
312
318
  KnownLexicalAnalyzerName["Stop"] = "stop";
313
- /** An analyzer that uses the whitespace tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceAnalyzer.html */
319
+ /** An analyzer that uses the whitespace tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceAnalyzer.html */
314
320
  KnownLexicalAnalyzerName["Whitespace"] = "whitespace";
315
321
  })(KnownLexicalAnalyzerName || (KnownLexicalAnalyzerName = {}));
316
322
  /** Known values of {@link LexicalNormalizerName} that the service accepts. */
317
323
  export var KnownLexicalNormalizerName;
318
324
  (function (KnownLexicalNormalizerName) {
319
- /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html */
325
+ /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
320
326
  KnownLexicalNormalizerName["AsciiFolding"] = "asciifolding";
321
- /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html */
327
+ /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
322
328
  KnownLexicalNormalizerName["Elision"] = "elision";
323
- /** Normalizes token text to lowercase. See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.html */
329
+ /** Normalizes token text to lowercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
324
330
  KnownLexicalNormalizerName["Lowercase"] = "lowercase";
325
- /** Standard normalizer, which consists of lowercase and asciifolding. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html */
331
+ /** Standard normalizer, which consists of lowercase and asciifolding. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
326
332
  KnownLexicalNormalizerName["Standard"] = "standard";
327
- /** Normalizes token text to uppercase. See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html */
333
+ /** Normalizes token text to uppercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
328
334
  KnownLexicalNormalizerName["Uppercase"] = "uppercase";
329
335
  })(KnownLexicalNormalizerName || (KnownLexicalNormalizerName = {}));
330
336
  /** Known values of {@link VectorSearchAlgorithmKind} that the service accepts. */
331
337
  export var KnownVectorSearchAlgorithmKind;
332
338
  (function (KnownVectorSearchAlgorithmKind) {
333
- /** Hnsw (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm. */
339
+ /** HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm. */
334
340
  KnownVectorSearchAlgorithmKind["Hnsw"] = "hnsw";
335
341
  /** Exhaustive KNN algorithm which will perform brute-force search. */
336
342
  KnownVectorSearchAlgorithmKind["ExhaustiveKnn"] = "exhaustiveKnn";
@@ -338,79 +344,85 @@ export var KnownVectorSearchAlgorithmKind;
338
344
  /** Known values of {@link VectorSearchVectorizerKind} that the service accepts. */
339
345
  export var KnownVectorSearchVectorizerKind;
340
346
  (function (KnownVectorSearchVectorizerKind) {
341
- /** Generate embeddings using an Azure Open AI service at query time. */
347
+ /** Generate embeddings using an Azure OpenAI resource at query time. */
342
348
  KnownVectorSearchVectorizerKind["AzureOpenAI"] = "azureOpenAI";
343
349
  /** Generate embeddings using a custom web endpoint at query time. */
344
350
  KnownVectorSearchVectorizerKind["CustomWebApi"] = "customWebApi";
345
351
  })(KnownVectorSearchVectorizerKind || (KnownVectorSearchVectorizerKind = {}));
352
+ /** Known values of {@link VectorSearchCompressionKind} that the service accepts. */
353
+ export var KnownVectorSearchCompressionKind;
354
+ (function (KnownVectorSearchCompressionKind) {
355
+ /** Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. */
356
+ KnownVectorSearchCompressionKind["ScalarQuantization"] = "scalarQuantization";
357
+ })(KnownVectorSearchCompressionKind || (KnownVectorSearchCompressionKind = {}));
346
358
  /** Known values of {@link TokenFilterName} that the service accepts. */
347
359
  export var KnownTokenFilterName;
348
360
  (function (KnownTokenFilterName) {
349
- /** A token filter that applies the Arabic normalizer to normalize the orthography. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html */
361
+ /** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html */
350
362
  KnownTokenFilterName["ArabicNormalization"] = "arabic_normalization";
351
- /** Strips all characters after an apostrophe (including the apostrophe itself). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html */
363
+ /** Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html */
352
364
  KnownTokenFilterName["Apostrophe"] = "apostrophe";
353
- /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html */
365
+ /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
354
366
  KnownTokenFilterName["AsciiFolding"] = "asciifolding";
355
- /** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html */
367
+ /** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html */
356
368
  KnownTokenFilterName["CjkBigram"] = "cjk_bigram";
357
- /** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html */
369
+ /** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html */
358
370
  KnownTokenFilterName["CjkWidth"] = "cjk_width";
359
- /** Removes English possessives, and dots from acronyms. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html */
371
+ /** Removes English possessives, and dots from acronyms. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html */
360
372
  KnownTokenFilterName["Classic"] = "classic";
361
- /** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html */
373
+ /** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html */
362
374
  KnownTokenFilterName["CommonGram"] = "common_grams";
363
- /** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html */
375
+ /** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html */
364
376
  KnownTokenFilterName["EdgeNGram"] = "edgeNGram_v2";
365
- /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html */
377
+ /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
366
378
  KnownTokenFilterName["Elision"] = "elision";
367
- /** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html */
379
+ /** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html */
368
380
  KnownTokenFilterName["GermanNormalization"] = "german_normalization";
369
- /** Normalizes text in Hindi to remove some differences in spelling variations. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html */
381
+ /** Normalizes text in Hindi to remove some differences in spelling variations. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html */
370
382
  KnownTokenFilterName["HindiNormalization"] = "hindi_normalization";
371
- /** Normalizes the Unicode representation of text in Indian languages. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html */
383
+ /** Normalizes the Unicode representation of text in Indian languages. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html */
372
384
  KnownTokenFilterName["IndicNormalization"] = "indic_normalization";
373
- /** Emits each incoming token twice, once as keyword and once as non-keyword. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html */
385
+ /** Emits each incoming token twice, once as keyword and once as non-keyword. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html */
374
386
  KnownTokenFilterName["KeywordRepeat"] = "keyword_repeat";
375
- /** A high-performance kstem filter for English. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html */
387
+ /** A high-performance kstem filter for English. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html */
376
388
  KnownTokenFilterName["KStem"] = "kstem";
377
- /** Removes words that are too long or too short. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html */
389
+ /** Removes words that are too long or too short. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html */
378
390
  KnownTokenFilterName["Length"] = "length";
379
- /** Limits the number of tokens while indexing. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html */
391
+ /** Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html */
380
392
  KnownTokenFilterName["Limit"] = "limit";
381
- /** Normalizes token text to lower case. See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.html */
393
+ /** Normalizes token text to lower case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
382
394
  KnownTokenFilterName["Lowercase"] = "lowercase";
383
- /** Generates n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html */
395
+ /** Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html */
384
396
  KnownTokenFilterName["NGram"] = "nGram_v2";
385
- /** Applies normalization for Persian. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html */
397
+ /** Applies normalization for Persian. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html */
386
398
  KnownTokenFilterName["PersianNormalization"] = "persian_normalization";
387
- /** Create tokens for phonetic matches. See https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html */
399
+ /** Create tokens for phonetic matches. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html */
388
400
  KnownTokenFilterName["Phonetic"] = "phonetic";
389
- /** Uses the Porter stemming algorithm to transform the token stream. See http://tartarus.org/~martin/PorterStemmer */
401
+ /** Uses the Porter stemming algorithm to transform the token stream. See http:\//tartarus.org\/~martin\/PorterStemmer */
390
402
  KnownTokenFilterName["PorterStem"] = "porter_stem";
391
- /** Reverses the token string. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html */
403
+ /** Reverses the token string. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
392
404
  KnownTokenFilterName["Reverse"] = "reverse";
393
- /** Normalizes use of the interchangeable Scandinavian characters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html */
405
+ /** Normalizes use of the interchangeable Scandinavian characters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html */
394
406
  KnownTokenFilterName["ScandinavianNormalization"] = "scandinavian_normalization";
395
- /** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html */
407
+ /** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html */
396
408
  KnownTokenFilterName["ScandinavianFoldingNormalization"] = "scandinavian_folding";
397
- /** Creates combinations of tokens as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html */
409
+ /** Creates combinations of tokens as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html */
398
410
  KnownTokenFilterName["Shingle"] = "shingle";
399
- /** A filter that stems words using a Snowball-generated stemmer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html */
411
+ /** A filter that stems words using a Snowball-generated stemmer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html */
400
412
  KnownTokenFilterName["Snowball"] = "snowball";
401
- /** Normalizes the Unicode representation of Sorani text. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html */
413
+ /** Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html */
402
414
  KnownTokenFilterName["SoraniNormalization"] = "sorani_normalization";
403
- /** Language specific stemming filter. See https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters */
415
+ /** Language specific stemming filter. See https:\//docs.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */
404
416
  KnownTokenFilterName["Stemmer"] = "stemmer";
405
- /** Removes stop words from a token stream. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html */
417
+ /** Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html */
406
418
  KnownTokenFilterName["Stopwords"] = "stopwords";
407
- /** Trims leading and trailing whitespace from tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html */
419
+ /** Trims leading and trailing whitespace from tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html */
408
420
  KnownTokenFilterName["Trim"] = "trim";
409
- /** Truncates the terms to a specific length. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html */
421
+ /** Truncates the terms to a specific length. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html */
410
422
  KnownTokenFilterName["Truncate"] = "truncate";
411
- /** Filters out tokens with same text as the previous token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html */
423
+ /** Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html */
412
424
  KnownTokenFilterName["Unique"] = "unique";
413
- /** Normalizes token text to upper case. See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html */
425
+ /** Normalizes token text to upper case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
414
426
  KnownTokenFilterName["Uppercase"] = "uppercase";
415
427
  /** Splits words into subwords and performs optional transformations on subword groups. */
416
428
  KnownTokenFilterName["WordDelimiter"] = "word_delimiter";
@@ -418,16 +430,25 @@ export var KnownTokenFilterName;
418
430
  /** Known values of {@link CharFilterName} that the service accepts. */
419
431
  export var KnownCharFilterName;
420
432
  (function (KnownCharFilterName) {
421
- /** A character filter that attempts to strip out HTML constructs. See https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.html */
433
+ /** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */
422
434
  KnownCharFilterName["HtmlStrip"] = "html_strip";
423
435
  })(KnownCharFilterName || (KnownCharFilterName = {}));
424
436
  /** Known values of {@link VectorSearchAlgorithmMetric} that the service accepts. */
425
437
  export var KnownVectorSearchAlgorithmMetric;
426
438
  (function (KnownVectorSearchAlgorithmMetric) {
439
+ /** Cosine */
427
440
  KnownVectorSearchAlgorithmMetric["Cosine"] = "cosine";
441
+ /** Euclidean */
428
442
  KnownVectorSearchAlgorithmMetric["Euclidean"] = "euclidean";
443
+ /** DotProduct */
429
444
  KnownVectorSearchAlgorithmMetric["DotProduct"] = "dotProduct";
430
445
  })(KnownVectorSearchAlgorithmMetric || (KnownVectorSearchAlgorithmMetric = {}));
446
+ /** Known values of {@link VectorSearchCompressionTargetDataType} that the service accepts. */
447
+ export var KnownVectorSearchCompressionTargetDataType;
448
+ (function (KnownVectorSearchCompressionTargetDataType) {
449
+ /** Int8 */
450
+ KnownVectorSearchCompressionTargetDataType["Int8"] = "int8";
451
+ })(KnownVectorSearchCompressionTargetDataType || (KnownVectorSearchCompressionTargetDataType = {}));
431
452
  /** Known values of {@link KeyPhraseExtractionSkillLanguage} that the service accepts. */
432
453
  export var KnownKeyPhraseExtractionSkillLanguage;
433
454
  (function (KnownKeyPhraseExtractionSkillLanguage) {
@@ -1067,6 +1088,12 @@ export var KnownPIIDetectionSkillMaskingMode;
1067
1088
  /** Known values of {@link SplitSkillLanguage} that the service accepts. */
1068
1089
  export var KnownSplitSkillLanguage;
1069
1090
  (function (KnownSplitSkillLanguage) {
1091
+ /** Amharic */
1092
+ KnownSplitSkillLanguage["Am"] = "am";
1093
+ /** Bosnian */
1094
+ KnownSplitSkillLanguage["Bs"] = "bs";
1095
+ /** Czech */
1096
+ KnownSplitSkillLanguage["Cs"] = "cs";
1070
1097
  /** Danish */
1071
1098
  KnownSplitSkillLanguage["Da"] = "da";
1072
1099
  /** German */
@@ -1075,16 +1102,58 @@ export var KnownSplitSkillLanguage;
1075
1102
  KnownSplitSkillLanguage["En"] = "en";
1076
1103
  /** Spanish */
1077
1104
  KnownSplitSkillLanguage["Es"] = "es";
1105
+ /** Estonian */
1106
+ KnownSplitSkillLanguage["Et"] = "et";
1078
1107
  /** Finnish */
1079
1108
  KnownSplitSkillLanguage["Fi"] = "fi";
1080
1109
  /** French */
1081
1110
  KnownSplitSkillLanguage["Fr"] = "fr";
1111
+ /** Hebrew */
1112
+ KnownSplitSkillLanguage["He"] = "he";
1113
+ /** Hindi */
1114
+ KnownSplitSkillLanguage["Hi"] = "hi";
1115
+ /** Croatian */
1116
+ KnownSplitSkillLanguage["Hr"] = "hr";
1117
+ /** Hungarian */
1118
+ KnownSplitSkillLanguage["Hu"] = "hu";
1119
+ /** Indonesian */
1120
+ KnownSplitSkillLanguage["Id"] = "id";
1121
+ /** Icelandic */
1122
+ KnownSplitSkillLanguage["Is"] = "is";
1082
1123
  /** Italian */
1083
1124
  KnownSplitSkillLanguage["It"] = "it";
1125
+ /** Japanese */
1126
+ KnownSplitSkillLanguage["Ja"] = "ja";
1084
1127
  /** Korean */
1085
1128
  KnownSplitSkillLanguage["Ko"] = "ko";
1086
- /** Portuguese */
1129
+ /** Latvian */
1130
+ KnownSplitSkillLanguage["Lv"] = "lv";
1131
+ /** Norwegian */
1132
+ KnownSplitSkillLanguage["Nb"] = "nb";
1133
+ /** Dutch */
1134
+ KnownSplitSkillLanguage["Nl"] = "nl";
1135
+ /** Polish */
1136
+ KnownSplitSkillLanguage["Pl"] = "pl";
1137
+ /** Portuguese (Portugal) */
1087
1138
  KnownSplitSkillLanguage["Pt"] = "pt";
1139
+ /** Portuguese (Brazil) */
1140
+ KnownSplitSkillLanguage["PtBr"] = "pt-br";
1141
+ /** Russian */
1142
+ KnownSplitSkillLanguage["Ru"] = "ru";
1143
+ /** Slovak */
1144
+ KnownSplitSkillLanguage["Sk"] = "sk";
1145
+ /** Slovenian */
1146
+ KnownSplitSkillLanguage["Sl"] = "sl";
1147
+ /** Serbian */
1148
+ KnownSplitSkillLanguage["Sr"] = "sr";
1149
+ /** Swedish */
1150
+ KnownSplitSkillLanguage["Sv"] = "sv";
1151
+ /** Turkish */
1152
+ KnownSplitSkillLanguage["Tr"] = "tr";
1153
+ /** Urdu */
1154
+ KnownSplitSkillLanguage["Ur"] = "ur";
1155
+ /** Chinese (Simplified) */
1156
+ KnownSplitSkillLanguage["Zh"] = "zh";
1088
1157
  })(KnownSplitSkillLanguage || (KnownSplitSkillLanguage = {}));
1089
1158
  /** Known values of {@link TextSplitMode} that the service accepts. */
1090
1159
  export var KnownTextSplitMode;
@@ -1267,31 +1336,31 @@ export var KnownTextTranslationSkillLanguage;
1267
1336
  /** Known values of {@link LexicalTokenizerName} that the service accepts. */
1268
1337
  export var KnownLexicalTokenizerName;
1269
1338
  (function (KnownLexicalTokenizerName) {
1270
- /** Grammar-based tokenizer that is suitable for processing most European-language documents. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html */
1339
+ /** Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html */
1271
1340
  KnownLexicalTokenizerName["Classic"] = "classic";
1272
- /** Tokenizes the input from an edge into n-grams of the given size(s). See https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html */
1341
+ /** Tokenizes the input from an edge into n-grams of the given size(s). See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenizer.html */
1273
1342
  KnownLexicalTokenizerName["EdgeNGram"] = "edgeNGram";
1274
- /** Emits the entire input as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html */
1343
+ /** Emits the entire input as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordTokenizer.html */
1275
1344
  KnownLexicalTokenizerName["Keyword"] = "keyword_v2";
1276
- /** Divides text at non-letters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LetterTokenizer.html */
1345
+ /** Divides text at non-letters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LetterTokenizer.html */
1277
1346
  KnownLexicalTokenizerName["Letter"] = "letter";
1278
- /** Divides text at non-letters and converts them to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseTokenizer.html */
1347
+ /** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseTokenizer.html */
1279
1348
  KnownLexicalTokenizerName["Lowercase"] = "lowercase";
1280
1349
  /** Divides text using language-specific rules. */
1281
1350
  KnownLexicalTokenizerName["MicrosoftLanguageTokenizer"] = "microsoft_language_tokenizer";
1282
1351
  /** Divides text using language-specific rules and reduces words to their base forms. */
1283
1352
  KnownLexicalTokenizerName["MicrosoftLanguageStemmingTokenizer"] = "microsoft_language_stemming_tokenizer";
1284
- /** Tokenizes the input into n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html */
1353
+ /** Tokenizes the input into n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenizer.html */
1285
1354
  KnownLexicalTokenizerName["NGram"] = "nGram";
1286
- /** Tokenizer for path-like hierarchies. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html */
1355
+ /** Tokenizer for path-like hierarchies. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/path\/PathHierarchyTokenizer.html */
1287
1356
  KnownLexicalTokenizerName["PathHierarchy"] = "path_hierarchy_v2";
1288
- /** Tokenizer that uses regex pattern matching to construct distinct tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html */
1357
+ /** Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/pattern\/PatternTokenizer.html */
1289
1358
  KnownLexicalTokenizerName["Pattern"] = "pattern";
1290
- /** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html */
1359
+ /** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/StandardTokenizer.html */
1291
1360
  KnownLexicalTokenizerName["Standard"] = "standard_v2";
1292
- /** Tokenizes urls and emails as one token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html */
1361
+ /** Tokenizes urls and emails as one token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/UAX29URLEmailTokenizer.html */
1293
1362
  KnownLexicalTokenizerName["UaxUrlEmail"] = "uax_url_email";
1294
- /** Divides text at whitespace. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html */
1363
+ /** Divides text at whitespace. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html */
1295
1364
  KnownLexicalTokenizerName["Whitespace"] = "whitespace";
1296
1365
  })(KnownLexicalTokenizerName || (KnownLexicalTokenizerName = {}));
1297
1366
  /** Known values of {@link RegexFlags} that the service accepts. */