@azure/search-documents 12.1.0 → 12.2.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. package/README.md +1 -1
  2. package/dist/index.js +2298 -426
  3. package/dist/index.js.map +1 -1
  4. package/dist-esm/src/base64.browser.js +1 -1
  5. package/dist-esm/src/base64.browser.js.map +1 -1
  6. package/dist-esm/src/base64.js +1 -1
  7. package/dist-esm/src/base64.js.map +1 -1
  8. package/dist-esm/src/errorModels.js +1 -1
  9. package/dist-esm/src/errorModels.js.map +1 -1
  10. package/dist-esm/src/generated/data/models/index.js +208 -6
  11. package/dist-esm/src/generated/data/models/index.js.map +1 -1
  12. package/dist-esm/src/generated/data/models/mappers.js +378 -0
  13. package/dist-esm/src/generated/data/models/mappers.js.map +1 -1
  14. package/dist-esm/src/generated/data/models/parameters.js +42 -0
  15. package/dist-esm/src/generated/data/models/parameters.js.map +1 -1
  16. package/dist-esm/src/generated/data/operations/documents.js +4 -0
  17. package/dist-esm/src/generated/data/operations/documents.js.map +1 -1
  18. package/dist-esm/src/generated/data/searchClient.js +1 -1
  19. package/dist-esm/src/generated/data/searchClient.js.map +1 -1
  20. package/dist-esm/src/generated/service/models/index.js +154 -84
  21. package/dist-esm/src/generated/service/models/index.js.map +1 -1
  22. package/dist-esm/src/generated/service/models/mappers.js +684 -70
  23. package/dist-esm/src/generated/service/models/mappers.js.map +1 -1
  24. package/dist-esm/src/generated/service/models/parameters.js +51 -1
  25. package/dist-esm/src/generated/service/models/parameters.js.map +1 -1
  26. package/dist-esm/src/generated/service/operations/aliases.js +160 -0
  27. package/dist-esm/src/generated/service/operations/aliases.js.map +1 -0
  28. package/dist-esm/src/generated/service/operations/dataSources.js +4 -1
  29. package/dist-esm/src/generated/service/operations/dataSources.js.map +1 -1
  30. package/dist-esm/src/generated/service/operations/index.js +1 -0
  31. package/dist-esm/src/generated/service/operations/index.js.map +1 -1
  32. package/dist-esm/src/generated/service/operations/indexers.js +29 -1
  33. package/dist-esm/src/generated/service/operations/indexers.js.map +1 -1
  34. package/dist-esm/src/generated/service/operations/skillsets.js +30 -1
  35. package/dist-esm/src/generated/service/operations/skillsets.js.map +1 -1
  36. package/dist-esm/src/generated/service/operationsInterfaces/aliases.js +9 -0
  37. package/dist-esm/src/generated/service/operationsInterfaces/aliases.js.map +1 -0
  38. package/dist-esm/src/generated/service/operationsInterfaces/index.js +1 -0
  39. package/dist-esm/src/generated/service/operationsInterfaces/index.js.map +1 -1
  40. package/dist-esm/src/generated/service/operationsInterfaces/indexers.js.map +1 -1
  41. package/dist-esm/src/generated/service/operationsInterfaces/skillsets.js.map +1 -1
  42. package/dist-esm/src/generated/service/searchServiceClient.js +3 -2
  43. package/dist-esm/src/generated/service/searchServiceClient.js.map +1 -1
  44. package/dist-esm/src/geographyPoint.js +1 -1
  45. package/dist-esm/src/geographyPoint.js.map +1 -1
  46. package/dist-esm/src/index.js +4 -4
  47. package/dist-esm/src/index.js.map +1 -1
  48. package/dist-esm/src/indexDocumentsBatch.js +1 -1
  49. package/dist-esm/src/indexDocumentsBatch.js.map +1 -1
  50. package/dist-esm/src/indexModels.js +1 -1
  51. package/dist-esm/src/indexModels.js.map +1 -1
  52. package/dist-esm/src/logger.js +1 -1
  53. package/dist-esm/src/logger.js.map +1 -1
  54. package/dist-esm/src/odata.js +1 -1
  55. package/dist-esm/src/odata.js.map +1 -1
  56. package/dist-esm/src/odataMetadataPolicy.js +1 -1
  57. package/dist-esm/src/odataMetadataPolicy.js.map +1 -1
  58. package/dist-esm/src/searchApiKeyCredentialPolicy.js +1 -1
  59. package/dist-esm/src/searchApiKeyCredentialPolicy.js.map +1 -1
  60. package/dist-esm/src/searchAudience.js +1 -1
  61. package/dist-esm/src/searchAudience.js.map +1 -1
  62. package/dist-esm/src/searchClient.js +11 -4
  63. package/dist-esm/src/searchClient.js.map +1 -1
  64. package/dist-esm/src/searchIndexClient.js +153 -4
  65. package/dist-esm/src/searchIndexClient.js.map +1 -1
  66. package/dist-esm/src/searchIndexerClient.js +48 -1
  67. package/dist-esm/src/searchIndexerClient.js.map +1 -1
  68. package/dist-esm/src/searchIndexingBufferedSender.js +1 -1
  69. package/dist-esm/src/searchIndexingBufferedSender.js.map +1 -1
  70. package/dist-esm/src/serialization.js +1 -1
  71. package/dist-esm/src/serialization.js.map +1 -1
  72. package/dist-esm/src/serviceModels.js +1 -1
  73. package/dist-esm/src/serviceModels.js.map +1 -1
  74. package/dist-esm/src/serviceUtils.js +79 -19
  75. package/dist-esm/src/serviceUtils.js.map +1 -1
  76. package/dist-esm/src/synonymMapHelper.browser.js +1 -1
  77. package/dist-esm/src/synonymMapHelper.browser.js.map +1 -1
  78. package/dist-esm/src/synonymMapHelper.js +1 -1
  79. package/dist-esm/src/synonymMapHelper.js.map +1 -1
  80. package/dist-esm/src/tracing.js +1 -1
  81. package/dist-esm/src/tracing.js.map +1 -1
  82. package/dist-esm/src/walk.js +1 -1
  83. package/dist-esm/src/walk.js.map +1 -1
  84. package/package.json +6 -6
  85. package/types/search-documents.d.ts +1515 -98
@@ -5,12 +5,12 @@
5
5
  * Code generated by Microsoft (R) AutoRest Code Generator.
6
6
  * Changes may cause incorrect behavior and will be lost if the code is regenerated.
7
7
  */
8
- /** Known values of {@link ApiVersion20240701} that the service accepts. */
9
- export var KnownApiVersion20240701;
10
- (function (KnownApiVersion20240701) {
11
- /** Api Version '2024-07-01' */
12
- KnownApiVersion20240701["TwoThousandTwentyFour0701"] = "2024-07-01";
13
- })(KnownApiVersion20240701 || (KnownApiVersion20240701 = {}));
8
+ /** Known values of {@link ApiVersion20240901Preview} that the service accepts. */
9
+ export var KnownApiVersion20240901Preview;
10
+ (function (KnownApiVersion20240901Preview) {
11
+ /** Api Version '2024-09-01-preview' */
12
+ KnownApiVersion20240901Preview["TwoThousandTwentyFour0901Preview"] = "2024-09-01-preview";
13
+ })(KnownApiVersion20240901Preview || (KnownApiVersion20240901Preview = {}));
14
14
  /** Known values of {@link SearchIndexerDataSourceType} that the service accepts. */
15
15
  export var KnownSearchIndexerDataSourceType;
16
16
  (function (KnownSearchIndexerDataSourceType) {
@@ -26,6 +26,8 @@ export var KnownSearchIndexerDataSourceType;
26
26
  KnownSearchIndexerDataSourceType["MySql"] = "mysql";
27
27
  /** Indicates an ADLS Gen2 datasource. */
28
28
  KnownSearchIndexerDataSourceType["AdlsGen2"] = "adlsgen2";
29
+ /** Indicates a Microsoft Fabric OneLake datasource. */
30
+ KnownSearchIndexerDataSourceType["OneLake"] = "onelake";
29
31
  })(KnownSearchIndexerDataSourceType || (KnownSearchIndexerDataSourceType = {}));
30
32
  /** Known values of {@link BlobIndexerParsingMode} that the service accepts. */
31
33
  export var KnownBlobIndexerParsingMode;
@@ -79,6 +81,20 @@ export var KnownIndexerExecutionEnvironment;
79
81
  /** Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources. */
80
82
  KnownIndexerExecutionEnvironment["Private"] = "private";
81
83
  })(KnownIndexerExecutionEnvironment || (KnownIndexerExecutionEnvironment = {}));
84
+ /** Known values of {@link IndexerExecutionStatusDetail} that the service accepts. */
85
+ export var KnownIndexerExecutionStatusDetail;
86
+ (function (KnownIndexerExecutionStatusDetail) {
87
+ /** Indicates that the reset that occurred was for a call to ResetDocs. */
88
+ KnownIndexerExecutionStatusDetail["ResetDocs"] = "resetDocs";
89
+ })(KnownIndexerExecutionStatusDetail || (KnownIndexerExecutionStatusDetail = {}));
90
+ /** Known values of {@link IndexingMode} that the service accepts. */
91
+ export var KnownIndexingMode;
92
+ (function (KnownIndexingMode) {
93
+ /** The indexer is indexing all documents in the datasource. */
94
+ KnownIndexingMode["IndexingAllDocs"] = "indexingAllDocs";
95
+ /** The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status. */
96
+ KnownIndexingMode["IndexingResetDocs"] = "indexingResetDocs";
97
+ })(KnownIndexingMode || (KnownIndexingMode = {}));
82
98
  /** Known values of {@link IndexProjectionMode} that the service accepts. */
83
99
  export var KnownIndexProjectionMode;
84
100
  (function (KnownIndexProjectionMode) {
@@ -307,6 +323,20 @@ export var KnownLexicalAnalyzerName;
307
323
  /** An analyzer that uses the whitespace tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceAnalyzer.html */
308
324
  KnownLexicalAnalyzerName["Whitespace"] = "whitespace";
309
325
  })(KnownLexicalAnalyzerName || (KnownLexicalAnalyzerName = {}));
326
+ /** Known values of {@link LexicalNormalizerName} that the service accepts. */
327
+ export var KnownLexicalNormalizerName;
328
+ (function (KnownLexicalNormalizerName) {
329
+ /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
330
+ KnownLexicalNormalizerName["AsciiFolding"] = "asciifolding";
331
+ /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
332
+ KnownLexicalNormalizerName["Elision"] = "elision";
333
+ /** Normalizes token text to lowercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
334
+ KnownLexicalNormalizerName["Lowercase"] = "lowercase";
335
+ /** Standard normalizer, which consists of lowercase and asciifolding. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
336
+ KnownLexicalNormalizerName["Standard"] = "standard";
337
+ /** Normalizes token text to uppercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
338
+ KnownLexicalNormalizerName["Uppercase"] = "uppercase";
339
+ })(KnownLexicalNormalizerName || (KnownLexicalNormalizerName = {}));
310
340
  /** Known values of {@link VectorEncodingFormat} that the service accepts. */
311
341
  export var KnownVectorEncodingFormat;
312
342
  (function (KnownVectorEncodingFormat) {
@@ -328,6 +358,10 @@ export var KnownVectorSearchVectorizerKind;
328
358
  KnownVectorSearchVectorizerKind["AzureOpenAI"] = "azureOpenAI";
329
359
  /** Generate embeddings using a custom web endpoint at query time. */
330
360
  KnownVectorSearchVectorizerKind["CustomWebApi"] = "customWebApi";
361
+ /** Generate embeddings for an image or text input at query time using the Azure AI Services Vision Vectorize API. */
362
+ KnownVectorSearchVectorizerKind["AIServicesVision"] = "aiServicesVision";
363
+ /** Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Studio Model Catalog at query time. */
364
+ KnownVectorSearchVectorizerKind["AML"] = "aml";
331
365
  })(KnownVectorSearchVectorizerKind || (KnownVectorSearchVectorizerKind = {}));
332
366
  /** Known values of {@link VectorSearchCompressionKind} that the service accepts. */
333
367
  export var KnownVectorSearchCompressionKind;
@@ -337,6 +371,84 @@ export var KnownVectorSearchCompressionKind;
337
371
  /** Binary Quantization, a type of compression method. In binary quantization, the original vectors values are compressed to the narrower binary type by discretizing and representing each component of a vector using binary values, thereby reducing the overall data size. */
338
372
  KnownVectorSearchCompressionKind["BinaryQuantization"] = "binaryQuantization";
339
373
  })(KnownVectorSearchCompressionKind || (KnownVectorSearchCompressionKind = {}));
374
+ /** Known values of {@link TokenFilterName} that the service accepts. */
375
+ export var KnownTokenFilterName;
376
+ (function (KnownTokenFilterName) {
377
+ /** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html */
378
+ KnownTokenFilterName["ArabicNormalization"] = "arabic_normalization";
379
+ /** Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html */
380
+ KnownTokenFilterName["Apostrophe"] = "apostrophe";
381
+ /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
382
+ KnownTokenFilterName["AsciiFolding"] = "asciifolding";
383
+ /** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html */
384
+ KnownTokenFilterName["CjkBigram"] = "cjk_bigram";
385
+ /** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html */
386
+ KnownTokenFilterName["CjkWidth"] = "cjk_width";
387
+ /** Removes English possessives, and dots from acronyms. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html */
388
+ KnownTokenFilterName["Classic"] = "classic";
389
+ /** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html */
390
+ KnownTokenFilterName["CommonGram"] = "common_grams";
391
+ /** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html */
392
+ KnownTokenFilterName["EdgeNGram"] = "edgeNGram_v2";
393
+ /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
394
+ KnownTokenFilterName["Elision"] = "elision";
395
+ /** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html */
396
+ KnownTokenFilterName["GermanNormalization"] = "german_normalization";
397
+ /** Normalizes text in Hindi to remove some differences in spelling variations. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html */
398
+ KnownTokenFilterName["HindiNormalization"] = "hindi_normalization";
399
+ /** Normalizes the Unicode representation of text in Indian languages. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html */
400
+ KnownTokenFilterName["IndicNormalization"] = "indic_normalization";
401
+ /** Emits each incoming token twice, once as keyword and once as non-keyword. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html */
402
+ KnownTokenFilterName["KeywordRepeat"] = "keyword_repeat";
403
+ /** A high-performance kstem filter for English. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html */
404
+ KnownTokenFilterName["KStem"] = "kstem";
405
+ /** Removes words that are too long or too short. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html */
406
+ KnownTokenFilterName["Length"] = "length";
407
+ /** Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html */
408
+ KnownTokenFilterName["Limit"] = "limit";
409
+ /** Normalizes token text to lower case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
410
+ KnownTokenFilterName["Lowercase"] = "lowercase";
411
+ /** Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html */
412
+ KnownTokenFilterName["NGram"] = "nGram_v2";
413
+ /** Applies normalization for Persian. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html */
414
+ KnownTokenFilterName["PersianNormalization"] = "persian_normalization";
415
+ /** Create tokens for phonetic matches. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html */
416
+ KnownTokenFilterName["Phonetic"] = "phonetic";
417
+ /** Uses the Porter stemming algorithm to transform the token stream. See http:\//tartarus.org\/~martin\/PorterStemmer */
418
+ KnownTokenFilterName["PorterStem"] = "porter_stem";
419
+ /** Reverses the token string. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
420
+ KnownTokenFilterName["Reverse"] = "reverse";
421
+ /** Normalizes use of the interchangeable Scandinavian characters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html */
422
+ KnownTokenFilterName["ScandinavianNormalization"] = "scandinavian_normalization";
423
+ /** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html */
424
+ KnownTokenFilterName["ScandinavianFoldingNormalization"] = "scandinavian_folding";
425
+ /** Creates combinations of tokens as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html */
426
+ KnownTokenFilterName["Shingle"] = "shingle";
427
+ /** A filter that stems words using a Snowball-generated stemmer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html */
428
+ KnownTokenFilterName["Snowball"] = "snowball";
429
+ /** Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html */
430
+ KnownTokenFilterName["SoraniNormalization"] = "sorani_normalization";
431
+ /** Language specific stemming filter. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */
432
+ KnownTokenFilterName["Stemmer"] = "stemmer";
433
+ /** Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html */
434
+ KnownTokenFilterName["Stopwords"] = "stopwords";
435
+ /** Trims leading and trailing whitespace from tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html */
436
+ KnownTokenFilterName["Trim"] = "trim";
437
+ /** Truncates the terms to a specific length. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html */
438
+ KnownTokenFilterName["Truncate"] = "truncate";
439
+ /** Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html */
440
+ KnownTokenFilterName["Unique"] = "unique";
441
+ /** Normalizes token text to upper case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
442
+ KnownTokenFilterName["Uppercase"] = "uppercase";
443
+ /** Splits words into subwords and performs optional transformations on subword groups. */
444
+ KnownTokenFilterName["WordDelimiter"] = "word_delimiter";
445
+ })(KnownTokenFilterName || (KnownTokenFilterName = {}));
446
+ /** Known values of {@link CharFilterName} that the service accepts. */
447
+ export var KnownCharFilterName;
448
+ (function (KnownCharFilterName) {
449
+ /** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */
450
+ KnownCharFilterName["HtmlStrip"] = "html_strip";
451
+ })(KnownCharFilterName || (KnownCharFilterName = {}));
340
452
  /** Known values of {@link VectorSearchAlgorithmMetric} that the service accepts. */
341
453
  export var KnownVectorSearchAlgorithmMetric;
342
454
  (function (KnownVectorSearchAlgorithmMetric) {
@@ -365,6 +477,22 @@ export var KnownAzureOpenAIModelName;
365
477
  /** TextEmbedding3Small */
366
478
  KnownAzureOpenAIModelName["TextEmbedding3Small"] = "text-embedding-3-small";
367
479
  })(KnownAzureOpenAIModelName || (KnownAzureOpenAIModelName = {}));
480
+ /** Known values of {@link AIStudioModelCatalogName} that the service accepts. */
481
+ export var KnownAIStudioModelCatalogName;
482
+ (function (KnownAIStudioModelCatalogName) {
483
+ /** OpenAIClipImageTextEmbeddingsVitBasePatch32 */
484
+ KnownAIStudioModelCatalogName["OpenAIClipImageTextEmbeddingsVitBasePatch32"] = "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32";
485
+ /** OpenAIClipImageTextEmbeddingsViTLargePatch14336 */
486
+ KnownAIStudioModelCatalogName["OpenAIClipImageTextEmbeddingsViTLargePatch14336"] = "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336";
487
+ /** FacebookDinoV2ImageEmbeddingsViTBase */
488
+ KnownAIStudioModelCatalogName["FacebookDinoV2ImageEmbeddingsViTBase"] = "Facebook-DinoV2-Image-Embeddings-ViT-Base";
489
+ /** FacebookDinoV2ImageEmbeddingsViTGiant */
490
+ KnownAIStudioModelCatalogName["FacebookDinoV2ImageEmbeddingsViTGiant"] = "Facebook-DinoV2-Image-Embeddings-ViT-Giant";
491
+ /** CohereEmbedV3English */
492
+ KnownAIStudioModelCatalogName["CohereEmbedV3English"] = "Cohere-embed-v3-english";
493
+ /** CohereEmbedV3Multilingual */
494
+ KnownAIStudioModelCatalogName["CohereEmbedV3Multilingual"] = "Cohere-embed-v3-multilingual";
495
+ })(KnownAIStudioModelCatalogName || (KnownAIStudioModelCatalogName = {}));
368
496
  /** Known values of {@link KeyPhraseExtractionSkillLanguage} that the service accepts. */
369
497
  export var KnownKeyPhraseExtractionSkillLanguage;
370
498
  (function (KnownKeyPhraseExtractionSkillLanguage) {
@@ -1079,6 +1207,26 @@ export var KnownTextSplitMode;
1079
1207
  /** Split the text into individual sentences. */
1080
1208
  KnownTextSplitMode["Sentences"] = "sentences";
1081
1209
  })(KnownTextSplitMode || (KnownTextSplitMode = {}));
1210
+ /** Known values of {@link SplitSkillUnit} that the service accepts. */
1211
+ export var KnownSplitSkillUnit;
1212
+ (function (KnownSplitSkillUnit) {
1213
+ /** The length will be measured by character. */
1214
+ KnownSplitSkillUnit["Characters"] = "characters";
1215
+ /** The length will be measured by an AzureOpenAI tokenizer from the tiktoken library. */
1216
+ KnownSplitSkillUnit["AzureOpenAITokens"] = "azureOpenAITokens";
1217
+ })(KnownSplitSkillUnit || (KnownSplitSkillUnit = {}));
1218
+ /** Known values of {@link SplitSkillEncoderModelName} that the service accepts. */
1219
+ export var KnownSplitSkillEncoderModelName;
1220
+ (function (KnownSplitSkillEncoderModelName) {
1221
+ /** Refers to a base model trained with a 50,000 token vocabulary, often used in general natural language processing tasks. */
1222
+ KnownSplitSkillEncoderModelName["R50KBase"] = "r50k_base";
1223
+ /** A base model with a 50,000 token vocabulary, optimized for prompt-based tasks. */
1224
+ KnownSplitSkillEncoderModelName["P50KBase"] = "p50k_base";
1225
+ /** Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token vocabulary. */
1226
+ KnownSplitSkillEncoderModelName["P50KEdit"] = "p50k_edit";
1227
+ /** A base model with a 100,000 token vocabulary. */
1228
+ KnownSplitSkillEncoderModelName["CL100KBase"] = "cl100k_base";
1229
+ })(KnownSplitSkillEncoderModelName || (KnownSplitSkillEncoderModelName = {}));
1082
1230
  /** Known values of {@link CustomEntityLookupSkillLanguage} that the service accepts. */
1083
1231
  export var KnownCustomEntityLookupSkillLanguage;
1084
1232
  (function (KnownCustomEntityLookupSkillLanguage) {
@@ -1279,84 +1427,6 @@ export var KnownLexicalTokenizerName;
1279
1427
  /** Divides text at whitespace. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html */
1280
1428
  KnownLexicalTokenizerName["Whitespace"] = "whitespace";
1281
1429
  })(KnownLexicalTokenizerName || (KnownLexicalTokenizerName = {}));
1282
- /** Known values of {@link TokenFilterName} that the service accepts. */
1283
- export var KnownTokenFilterName;
1284
- (function (KnownTokenFilterName) {
1285
- /** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html */
1286
- KnownTokenFilterName["ArabicNormalization"] = "arabic_normalization";
1287
- /** Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html */
1288
- KnownTokenFilterName["Apostrophe"] = "apostrophe";
1289
- /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
1290
- KnownTokenFilterName["AsciiFolding"] = "asciifolding";
1291
- /** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html */
1292
- KnownTokenFilterName["CjkBigram"] = "cjk_bigram";
1293
- /** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html */
1294
- KnownTokenFilterName["CjkWidth"] = "cjk_width";
1295
- /** Removes English possessives, and dots from acronyms. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html */
1296
- KnownTokenFilterName["Classic"] = "classic";
1297
- /** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html */
1298
- KnownTokenFilterName["CommonGram"] = "common_grams";
1299
- /** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html */
1300
- KnownTokenFilterName["EdgeNGram"] = "edgeNGram_v2";
1301
- /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
1302
- KnownTokenFilterName["Elision"] = "elision";
1303
- /** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html */
1304
- KnownTokenFilterName["GermanNormalization"] = "german_normalization";
1305
- /** Normalizes text in Hindi to remove some differences in spelling variations. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html */
1306
- KnownTokenFilterName["HindiNormalization"] = "hindi_normalization";
1307
- /** Normalizes the Unicode representation of text in Indian languages. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html */
1308
- KnownTokenFilterName["IndicNormalization"] = "indic_normalization";
1309
- /** Emits each incoming token twice, once as keyword and once as non-keyword. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html */
1310
- KnownTokenFilterName["KeywordRepeat"] = "keyword_repeat";
1311
- /** A high-performance kstem filter for English. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html */
1312
- KnownTokenFilterName["KStem"] = "kstem";
1313
- /** Removes words that are too long or too short. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html */
1314
- KnownTokenFilterName["Length"] = "length";
1315
- /** Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html */
1316
- KnownTokenFilterName["Limit"] = "limit";
1317
- /** Normalizes token text to lower case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
1318
- KnownTokenFilterName["Lowercase"] = "lowercase";
1319
- /** Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html */
1320
- KnownTokenFilterName["NGram"] = "nGram_v2";
1321
- /** Applies normalization for Persian. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html */
1322
- KnownTokenFilterName["PersianNormalization"] = "persian_normalization";
1323
- /** Create tokens for phonetic matches. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html */
1324
- KnownTokenFilterName["Phonetic"] = "phonetic";
1325
- /** Uses the Porter stemming algorithm to transform the token stream. See http:\//tartarus.org\/~martin\/PorterStemmer */
1326
- KnownTokenFilterName["PorterStem"] = "porter_stem";
1327
- /** Reverses the token string. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
1328
- KnownTokenFilterName["Reverse"] = "reverse";
1329
- /** Normalizes use of the interchangeable Scandinavian characters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html */
1330
- KnownTokenFilterName["ScandinavianNormalization"] = "scandinavian_normalization";
1331
- /** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html */
1332
- KnownTokenFilterName["ScandinavianFoldingNormalization"] = "scandinavian_folding";
1333
- /** Creates combinations of tokens as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html */
1334
- KnownTokenFilterName["Shingle"] = "shingle";
1335
- /** A filter that stems words using a Snowball-generated stemmer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html */
1336
- KnownTokenFilterName["Snowball"] = "snowball";
1337
- /** Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html */
1338
- KnownTokenFilterName["SoraniNormalization"] = "sorani_normalization";
1339
- /** Language specific stemming filter. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */
1340
- KnownTokenFilterName["Stemmer"] = "stemmer";
1341
- /** Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html */
1342
- KnownTokenFilterName["Stopwords"] = "stopwords";
1343
- /** Trims leading and trailing whitespace from tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html */
1344
- KnownTokenFilterName["Trim"] = "trim";
1345
- /** Truncates the terms to a specific length. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html */
1346
- KnownTokenFilterName["Truncate"] = "truncate";
1347
- /** Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html */
1348
- KnownTokenFilterName["Unique"] = "unique";
1349
- /** Normalizes token text to upper case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
1350
- KnownTokenFilterName["Uppercase"] = "uppercase";
1351
- /** Splits words into subwords and performs optional transformations on subword groups. */
1352
- KnownTokenFilterName["WordDelimiter"] = "word_delimiter";
1353
- })(KnownTokenFilterName || (KnownTokenFilterName = {}));
1354
- /** Known values of {@link CharFilterName} that the service accepts. */
1355
- export var KnownCharFilterName;
1356
- (function (KnownCharFilterName) {
1357
- /** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */
1358
- KnownCharFilterName["HtmlStrip"] = "html_strip";
1359
- })(KnownCharFilterName || (KnownCharFilterName = {}));
1360
1430
  /** Known values of {@link RegexFlags} that the service accepts. */
1361
1431
  export var KnownRegexFlags;
1362
1432
  (function (KnownRegexFlags) {