@azure/search-documents 12.1.0 → 12.2.0-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. package/README.md +1 -1
  2. package/dist/index.js +2710 -454
  3. package/dist/index.js.map +1 -1
  4. package/dist-esm/src/base64.browser.js +1 -1
  5. package/dist-esm/src/base64.browser.js.map +1 -1
  6. package/dist-esm/src/base64.js +1 -1
  7. package/dist-esm/src/base64.js.map +1 -1
  8. package/dist-esm/src/errorModels.js +1 -1
  9. package/dist-esm/src/errorModels.js.map +1 -1
  10. package/dist-esm/src/generated/data/models/index.js +220 -6
  11. package/dist-esm/src/generated/data/models/index.js.map +1 -1
  12. package/dist-esm/src/generated/data/models/mappers.js +481 -0
  13. package/dist-esm/src/generated/data/models/mappers.js.map +1 -1
  14. package/dist-esm/src/generated/data/models/parameters.js +51 -0
  15. package/dist-esm/src/generated/data/models/parameters.js.map +1 -1
  16. package/dist-esm/src/generated/data/operations/documents.js +5 -0
  17. package/dist-esm/src/generated/data/operations/documents.js.map +1 -1
  18. package/dist-esm/src/generated/data/searchClient.js +1 -1
  19. package/dist-esm/src/generated/data/searchClient.js.map +1 -1
  20. package/dist-esm/src/generated/service/models/index.js +210 -84
  21. package/dist-esm/src/generated/service/models/index.js.map +1 -1
  22. package/dist-esm/src/generated/service/models/mappers.js +815 -77
  23. package/dist-esm/src/generated/service/models/mappers.js.map +1 -1
  24. package/dist-esm/src/generated/service/models/parameters.js +51 -1
  25. package/dist-esm/src/generated/service/models/parameters.js.map +1 -1
  26. package/dist-esm/src/generated/service/operations/aliases.js +160 -0
  27. package/dist-esm/src/generated/service/operations/aliases.js.map +1 -0
  28. package/dist-esm/src/generated/service/operations/dataSources.js +4 -1
  29. package/dist-esm/src/generated/service/operations/dataSources.js.map +1 -1
  30. package/dist-esm/src/generated/service/operations/index.js +1 -0
  31. package/dist-esm/src/generated/service/operations/index.js.map +1 -1
  32. package/dist-esm/src/generated/service/operations/indexers.js +29 -1
  33. package/dist-esm/src/generated/service/operations/indexers.js.map +1 -1
  34. package/dist-esm/src/generated/service/operations/skillsets.js +30 -1
  35. package/dist-esm/src/generated/service/operations/skillsets.js.map +1 -1
  36. package/dist-esm/src/generated/service/operationsInterfaces/aliases.js +9 -0
  37. package/dist-esm/src/generated/service/operationsInterfaces/aliases.js.map +1 -0
  38. package/dist-esm/src/generated/service/operationsInterfaces/index.js +1 -0
  39. package/dist-esm/src/generated/service/operationsInterfaces/index.js.map +1 -1
  40. package/dist-esm/src/generated/service/operationsInterfaces/indexers.js.map +1 -1
  41. package/dist-esm/src/generated/service/operationsInterfaces/skillsets.js.map +1 -1
  42. package/dist-esm/src/generated/service/searchServiceClient.js +3 -2
  43. package/dist-esm/src/generated/service/searchServiceClient.js.map +1 -1
  44. package/dist-esm/src/geographyPoint.js +1 -1
  45. package/dist-esm/src/geographyPoint.js.map +1 -1
  46. package/dist-esm/src/index.js +4 -4
  47. package/dist-esm/src/index.js.map +1 -1
  48. package/dist-esm/src/indexDocumentsBatch.js +1 -1
  49. package/dist-esm/src/indexDocumentsBatch.js.map +1 -1
  50. package/dist-esm/src/indexModels.js +1 -1
  51. package/dist-esm/src/indexModels.js.map +1 -1
  52. package/dist-esm/src/logger.js +1 -1
  53. package/dist-esm/src/logger.js.map +1 -1
  54. package/dist-esm/src/odata.js +1 -1
  55. package/dist-esm/src/odata.js.map +1 -1
  56. package/dist-esm/src/odataMetadataPolicy.js +1 -1
  57. package/dist-esm/src/odataMetadataPolicy.js.map +1 -1
  58. package/dist-esm/src/searchApiKeyCredentialPolicy.js +1 -1
  59. package/dist-esm/src/searchApiKeyCredentialPolicy.js.map +1 -1
  60. package/dist-esm/src/searchAudience.js +1 -1
  61. package/dist-esm/src/searchAudience.js.map +1 -1
  62. package/dist-esm/src/searchClient.js +52 -8
  63. package/dist-esm/src/searchClient.js.map +1 -1
  64. package/dist-esm/src/searchIndexClient.js +158 -6
  65. package/dist-esm/src/searchIndexClient.js.map +1 -1
  66. package/dist-esm/src/searchIndexerClient.js +53 -2
  67. package/dist-esm/src/searchIndexerClient.js.map +1 -1
  68. package/dist-esm/src/searchIndexingBufferedSender.js +1 -1
  69. package/dist-esm/src/searchIndexingBufferedSender.js.map +1 -1
  70. package/dist-esm/src/serialization.js +1 -1
  71. package/dist-esm/src/serialization.js.map +1 -1
  72. package/dist-esm/src/serviceModels.js +1 -1
  73. package/dist-esm/src/serviceModels.js.map +1 -1
  74. package/dist-esm/src/serviceUtils.js +112 -26
  75. package/dist-esm/src/serviceUtils.js.map +1 -1
  76. package/dist-esm/src/synonymMapHelper.browser.js +1 -1
  77. package/dist-esm/src/synonymMapHelper.browser.js.map +1 -1
  78. package/dist-esm/src/synonymMapHelper.js +1 -1
  79. package/dist-esm/src/synonymMapHelper.js.map +1 -1
  80. package/dist-esm/src/tracing.js +1 -1
  81. package/dist-esm/src/tracing.js.map +1 -1
  82. package/dist-esm/src/walk.js +1 -1
  83. package/dist-esm/src/walk.js.map +1 -1
  84. package/package.json +13 -16
  85. package/types/search-documents.d.ts +1774 -108
@@ -1,12 +1,74 @@
1
1
  /// <reference lib="esnext.asynciterable" />
2
2
 
3
3
  import { AzureKeyCredential } from '@azure/core-auth';
4
- import { ExtendedCommonClientOptions } from '@azure/core-http-compat';
5
- import { KeyCredential } from '@azure/core-auth';
6
- import { OperationOptions } from '@azure/core-client';
7
- import { PagedAsyncIterableIterator } from '@azure/core-paging';
8
- import { RestError } from '@azure/core-rest-pipeline';
9
- import { TokenCredential } from '@azure/core-auth';
4
+ import type { ExtendedCommonClientOptions } from '@azure/core-http-compat';
5
+ import type { KeyCredential } from '@azure/core-auth';
6
+ import type { OperationOptions } from '@azure/core-client';
7
+ import type { PagedAsyncIterableIterator } from '@azure/core-paging';
8
+ import type { Pipeline } from '@azure/core-rest-pipeline';
9
+ import type { RestError } from '@azure/core-rest-pipeline';
10
+ import type { TokenCredential } from '@azure/core-auth';
11
+
12
+ /** The multi-region account of an Azure AI service resource that's attached to a skillset. */
13
+ export declare interface AIServicesAccountIdentity extends BaseCognitiveServicesAccount {
14
+ /** Polymorphic discriminator, which specifies the different types this object can be */
15
+ odatatype: "#Microsoft.Azure.Search.AIServicesByIdentity";
16
+ /** The user-assigned managed identity used for connections to AI Service. If not specified, the system-assigned managed identity is used. On updates to the skillset, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */
17
+ identity?: SearchIndexerDataIdentity;
18
+ /** The subdomain url for the corresponding AI Service. */
19
+ subdomainUrl: string;
20
+ }
21
+
22
+ /** The account key of an Azure AI service resource that's attached to a skillset, to be used with the resource's subdomain. */
23
+ export declare interface AIServicesAccountKey extends BaseCognitiveServicesAccount {
24
+ /** Polymorphic discriminator, which specifies the different types this object can be */
25
+ odatatype: "#Microsoft.Azure.Search.AIServicesByKey";
26
+ /** The key used to provision the Azure AI service resource attached to a skillset. */
27
+ key: string;
28
+ /** The subdomain url for the corresponding AI Service. */
29
+ subdomainUrl: string;
30
+ }
31
+
32
+ /** Specifies the AI Services Vision parameters for vectorizing a query image or text. */
33
+ export declare interface AIServicesVisionParameters {
34
+ /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */
35
+ modelVersion?: string;
36
+ /** The resource URI of the AI Services resource. */
37
+ resourceUri: string;
38
+ /** API key of the designated AI Services resource. */
39
+ apiKey?: string;
40
+ /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the index, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */
41
+ authIdentity?: SearchIndexerDataIdentity;
42
+ }
43
+
44
+ /** Specifies the AI Services Vision parameters for vectorizing a query image or text. */
45
+ export declare interface AIServicesVisionVectorizer extends BaseVectorSearchVectorizer {
46
+ /** Polymorphic discriminator, which specifies the different types this object can be */
47
+ kind: "aiServicesVision";
48
+ /** Contains the parameters specific to AI Services Vision embedding vectorization. */
49
+ parameters?: AIServicesVisionParameters;
50
+ }
51
+
52
+ /**
53
+ * Defines values for AIStudioModelCatalogName. \
54
+ * {@link KnownAIStudioModelCatalogName} can be used interchangeably with AIStudioModelCatalogName,
55
+ * this enum contains the known values that the service supports.
56
+ * ### Known values supported by the service
57
+ * **OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32** \
58
+ * **OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336** \
59
+ * **Facebook-DinoV2-Image-Embeddings-ViT-Base** \
60
+ * **Facebook-DinoV2-Image-Embeddings-ViT-Giant** \
61
+ * **Cohere-embed-v3-english** \
62
+ * **Cohere-embed-v3-multilingual**
63
+ */
64
+ export declare type AIStudioModelCatalogName = string;
65
+
66
+ /**
67
+ * An iterator for listing the aliases that exist in the Search service. This will make requests
68
+ * as needed during iteration. Use .byPage() to make one request to the server
69
+ * per iteration.
70
+ */
71
+ export declare type AliasIterator = PagedAsyncIterableIterator<SearchIndexAlias, SearchIndexAlias[], {}>;
10
72
 
11
73
  /** Information about a token returned by an analyzer. */
12
74
  export declare interface AnalyzedTokenInfo {
@@ -54,6 +116,11 @@ export declare interface AnalyzeRequest {
54
116
  * NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.
55
117
  */
56
118
  tokenizerName?: LexicalTokenizerName;
119
+ /**
120
+ * The name of the normalizer to use to normalize the given text. {@link KnownNormalizerNames} is
121
+ * an enum containing built-in analyzer names.
122
+ */
123
+ normalizerName?: LexicalNormalizerName;
57
124
  /**
58
125
  * An optional list of token filters to use when breaking the given text. This parameter can only
59
126
  * be set when using the tokenizer parameter.
@@ -183,6 +250,35 @@ export declare interface AzureActiveDirectoryApplicationCredentials {
183
250
 
184
251
  export { AzureKeyCredential }
185
252
 
253
+ /** The AML skill allows you to extend AI enrichment with a custom Azure Machine Learning (AML) model. Once an AML model is trained and deployed, an AML skill integrates it into AI enrichment. */
254
+ export declare interface AzureMachineLearningSkill extends BaseSearchIndexerSkill {
255
+ /** Polymorphic discriminator, which specifies the different types this object can be */
256
+ odatatype: "#Microsoft.Skills.Custom.AmlSkill";
257
+ /** (Required for no authentication or key authentication) The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */
258
+ scoringUri?: string;
259
+ /** (Required for key authentication) The key for the AML service. */
260
+ authenticationKey?: string;
261
+ /** (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. */
262
+ resourceId?: string;
263
+ /** (Optional) When specified, indicates the timeout for the http client making the API call. */
264
+ timeout?: string;
265
+ /** (Optional for token authentication). The region the AML service is deployed in. */
266
+ region?: string;
267
+ /** (Optional) When specified, indicates the number of calls the indexer will make in parallel to the endpoint you have provided. You can decrease this value if your endpoint is failing under too high of a request load, or raise it if your endpoint is able to accept more requests and you would like an increase in the performance of the indexer. If not set, a default value of 5 is used. The degreeOfParallelism can be set to a maximum of 10 and a minimum of 1. */
268
+ degreeOfParallelism?: number;
269
+ }
270
+
271
+ /** Specifies an Azure Machine Learning endpoint deployed via the Azure AI Studio Model Catalog for generating the vector embedding of a query string. */
272
+ export declare interface AzureMachineLearningVectorizer extends BaseVectorSearchVectorizer {
273
+ /** Polymorphic discriminator, which specifies the different types this object can be */
274
+ kind: "aml";
275
+ /** Specifies the properties of the AML vectorizer. */
276
+ amlParameters?: AzureMachineLearningVectorizerParameters;
277
+ }
278
+
279
+ /** Specifies the properties for connecting to an AML vectorizer. */
280
+ export declare type AzureMachineLearningVectorizerParameters = NoAuthAzureMachineLearningVectorizerParameters | KeyAuthAzureMachineLearningVectorizerParameters | TokenAuthAzureMachineLearningVectorizerParameters;
281
+
186
282
  /** Allows you to generate a vector embedding for a given text input using the Azure OpenAI resource. */
187
283
  export declare interface AzureOpenAIEmbeddingSkill extends BaseSearchIndexerSkill, AzureOpenAIParameters {
188
284
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -202,13 +298,13 @@ export declare interface AzureOpenAIEmbeddingSkill extends BaseSearchIndexerSkil
202
298
  */
203
299
  export declare type AzureOpenAIModelName = string;
204
300
 
205
- /** Contains the parameters specific to using an Azure Open AI service for vectorization at query time. */
301
+ /** Specifies the parameters for connecting to the Azure OpenAI resource. */
206
302
  export declare interface AzureOpenAIParameters {
207
- /** The resource uri for your Azure Open AI resource. */
303
+ /** The resource URI of the Azure OpenAI resource. */
208
304
  resourceUrl?: string;
209
- /** ID of your Azure Open AI model deployment on the designated resource. */
305
+ /** ID of the Azure OpenAI model deployment on the designated resource. */
210
306
  deploymentId?: string;
211
- /** API key for the designated Azure Open AI resource. */
307
+ /** API key of the designated Azure OpenAI resource. */
212
308
  apiKey?: string;
213
309
  /** The user-assigned managed identity used for outbound connections. */
214
310
  authIdentity?: SearchIndexerDataIdentity;
@@ -216,6 +312,13 @@ export declare interface AzureOpenAIParameters {
216
312
  modelName?: AzureOpenAIModelName;
217
313
  }
218
314
 
315
+ export declare interface AzureOpenAITokenizerParameters {
316
+ /** Only applies if the unit is set to azureOpenAITokens. Options include 'R50k_base', 'P50k_base', 'P50k_edit' and 'CL100k_base'. The default value is 'CL100k_base'. */
317
+ encoderModelName?: SplitSkillEncoderModelName;
318
+ /** (Optional) Only applies if the unit is set to azureOpenAITokens. This parameter defines a collection of special tokens that are permitted within the tokenization process. */
319
+ allowedSpecialTokens?: string[];
320
+ }
321
+
219
322
  /** Contains the parameters specific to using an Azure Open AI service for vectorization at query time. */
220
323
  export declare interface AzureOpenAIVectorizer extends BaseVectorSearchVectorizer {
221
324
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -224,6 +327,14 @@ export declare interface AzureOpenAIVectorizer extends BaseVectorSearchVectorize
224
327
  parameters?: AzureOpenAIParameters;
225
328
  }
226
329
 
330
+ /** Specifies the properties common between all AML vectorizer auth types. */
331
+ export declare interface BaseAzureMachineLearningVectorizerParameters {
332
+ /** When specified, indicates the timeout for the http client making the API call. */
333
+ timeout?: string;
334
+ /** The name of the embedding model from the Azure AI Studio Catalog that is deployed at the provided endpoint. */
335
+ modelName?: AIStudioModelCatalogName;
336
+ }
337
+
227
338
  /** Base type for character filters. */
228
339
  export declare interface BaseCharFilter {
229
340
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -235,7 +346,7 @@ export declare interface BaseCharFilter {
235
346
  /** Base type for describing any Azure AI service resource attached to a skillset. */
236
347
  export declare interface BaseCognitiveServicesAccount {
237
348
  /** Polymorphic discriminator, which specifies the different types this object can be */
238
- odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices" | "#Microsoft.Azure.Search.CognitiveServicesByKey";
349
+ odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices" | "#Microsoft.Azure.Search.CognitiveServicesByKey" | "#Microsoft.Azure.Search.AIServicesByKey" | "#Microsoft.Azure.Search.AIServicesByIdentity";
239
350
  /** Description of the Azure AI service resource attached to a skillset. */
240
351
  description?: string;
241
352
  }
@@ -249,7 +360,7 @@ export declare interface BaseDataChangeDetectionPolicy {
249
360
  /** Base type for data deletion detection policies. */
250
361
  export declare interface BaseDataDeletionDetectionPolicy {
251
362
  /** Polymorphic discriminator, which specifies the different types this object can be */
252
- odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy";
363
+ odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy" | "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy";
253
364
  }
254
365
 
255
366
  /** Base type for analyzers. */
@@ -260,6 +371,14 @@ export declare interface BaseLexicalAnalyzer {
260
371
  name: string;
261
372
  }
262
373
 
374
+ /** Base type for normalizers. */
375
+ export declare interface BaseLexicalNormalizer {
376
+ /** Polymorphic discriminator, which specifies the different types this object can be */
377
+ odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
378
+ /** The name of the normalizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. */
379
+ name: string;
380
+ }
381
+
263
382
  /** Base type for tokenizers. */
264
383
  export declare interface BaseLexicalTokenizer {
265
384
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -289,7 +408,7 @@ export declare interface BaseSearchIndexerDataIdentity {
289
408
  /** Base type for skills. */
290
409
  export declare interface BaseSearchIndexerSkill {
291
410
  /** Polymorphic discriminator, which specifies the different types this object can be */
292
- odatatype: "#Microsoft.Skills.Util.ConditionalSkill" | "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" | "#Microsoft.Skills.Vision.OcrSkill" | "#Microsoft.Skills.Vision.ImageAnalysisSkill" | "#Microsoft.Skills.Text.LanguageDetectionSkill" | "#Microsoft.Skills.Util.ShaperSkill" | "#Microsoft.Skills.Text.MergeSkill" | "#Microsoft.Skills.Text.EntityRecognitionSkill" | "#Microsoft.Skills.Text.SentimentSkill" | "#Microsoft.Skills.Text.V3.SentimentSkill" | "#Microsoft.Skills.Text.V3.EntityLinkingSkill" | "#Microsoft.Skills.Text.V3.EntityRecognitionSkill" | "#Microsoft.Skills.Text.PIIDetectionSkill" | "#Microsoft.Skills.Text.SplitSkill" | "#Microsoft.Skills.Text.CustomEntityLookupSkill" | "#Microsoft.Skills.Text.TranslationSkill" | "#Microsoft.Skills.Util.DocumentExtractionSkill" | "#Microsoft.Skills.Custom.WebApiSkill" | "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill";
411
+ odatatype: "#Microsoft.Skills.Util.ConditionalSkill" | "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" | "#Microsoft.Skills.Vision.OcrSkill" | "#Microsoft.Skills.Vision.ImageAnalysisSkill" | "#Microsoft.Skills.Text.LanguageDetectionSkill" | "#Microsoft.Skills.Util.ShaperSkill" | "#Microsoft.Skills.Text.MergeSkill" | "#Microsoft.Skills.Text.EntityRecognitionSkill" | "#Microsoft.Skills.Text.SentimentSkill" | "#Microsoft.Skills.Text.V3.SentimentSkill" | "#Microsoft.Skills.Text.V3.EntityLinkingSkill" | "#Microsoft.Skills.Text.V3.EntityRecognitionSkill" | "#Microsoft.Skills.Text.PIIDetectionSkill" | "#Microsoft.Skills.Text.SplitSkill" | "#Microsoft.Skills.Text.CustomEntityLookupSkill" | "#Microsoft.Skills.Text.TranslationSkill" | "#Microsoft.Skills.Util.DocumentExtractionSkill" | "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill" | "#Microsoft.Skills.Custom.WebApiSkill" | "#Microsoft.Skills.Custom.AmlSkill" | "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill" | "#Microsoft.Skills.Vision.VectorizeSkill";
293
412
  /** The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. */
294
413
  name?: string;
295
414
  /** The description of the skill which describes the inputs, outputs, and usage of the skill. */
@@ -374,6 +493,14 @@ export declare interface BaseSearchRequestOptions<TModel extends object, TFields
374
493
  * fielded search expression take precedence over any field names listed in this parameter.
375
494
  */
376
495
  searchFields?: SearchFieldArray<TModel>;
496
+ /**
497
+ * The language of the query.
498
+ */
499
+ queryLanguage?: QueryLanguage;
500
+ /**
501
+ * Improve search recall by spell-correcting individual search query terms.
502
+ */
503
+ speller?: QuerySpeller;
377
504
  /**
378
505
  * A value that specifies whether any or all of the search terms must be matched in order to
379
506
  * count the document as a match. Possible values include: 'any', 'all'
@@ -415,6 +542,8 @@ export declare interface BaseSearchRequestOptions<TModel extends object, TFields
415
542
  * Defines options for vector search queries
416
543
  */
417
544
  vectorSearchOptions?: VectorSearchOptions<TModel>;
545
+ /** The query parameters to configure hybrid search behaviors. */
546
+ hybridSearch?: HybridSearchOptions;
418
547
  }
419
548
 
420
549
  /** Base type for token filters. */
@@ -431,6 +560,8 @@ export declare interface BaseVectorQuery<TModel extends object> {
431
560
  * ### Known values supported by the service
432
561
  * **vector**: Vector query where a raw vector value is provided.
433
562
  * **text**: Vector query where a text value that needs to be vectorized is provided.
563
+ * **imageUrl**: Vector query where an url that represents an image value that needs to be vectorized is provided.
564
+ * **imageBinary**: Vector query where a base 64 encoded binary of an image that needs to be vectorized is provided.
434
565
  */
435
566
  kind: VectorQueryKind;
436
567
  /** Number of nearest neighbors to return as top hits. */
@@ -452,6 +583,11 @@ export declare interface BaseVectorQuery<TModel extends object> {
452
583
  oversampling?: number;
453
584
  /** Relative weight of the vector query when compared to other vector query and/or the text query within the same search request. This value is used when combining the results of multiple ranking lists produced by the different vector queries and/or the results retrieved through the text query. The higher the weight, the higher the documents that matched that query will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger than zero. */
454
585
  weight?: number;
586
+ /** The threshold used for vector queries. Note this can only be set if all 'fields' use the same similarity metric. */
587
+ threshold?: VectorThreshold;
588
+ /** The OData filter expression to apply to this specific vector query. If no filter expression is defined at the vector level, the expression defined in
589
+ * the top level filter parameter is used instead. */
590
+ filterOverride?: string;
455
591
  }
456
592
 
457
593
  /** Contains configuration options specific to the algorithm used during indexing and/or querying. */
@@ -472,6 +608,10 @@ export declare interface BaseVectorSearchCompression {
472
608
  rerankWithOriginalVectors?: boolean;
473
609
  /** Default oversampling factor. Oversampling will internally request more documents (specified by this multiplier) in the initial search. This increases the set of results that will be reranked using recomputed similarity scores from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. */
474
610
  defaultOversampling?: number;
611
+ /** Contains the options for rescoring. */
612
+ rescoringOptions?: RescoringOptions;
613
+ /** The number of dimensions to truncate the vectors to. Truncating the vectors reduces the size of the vectors and the amount of data that needs to be transferred during search. This can save storage cost and improve search performance at the expense of recall. It should be only used for embeddings trained with Matryoshka Representation Learning (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no truncation. */
614
+ truncationDimension?: number;
475
615
  }
476
616
 
477
617
  /** Contains specific details for a vectorization method to be used during query time. */
@@ -482,6 +622,12 @@ export declare interface BaseVectorSearchVectorizer {
482
622
  vectorizerName: string;
483
623
  }
484
624
 
625
+ /** The threshold used for vector queries. */
626
+ export declare interface BaseVectorThreshold {
627
+ /** Polymorphic discriminator, which specifies the different types this object can be */
628
+ kind: "vectorSimilarity" | "searchScore";
629
+ }
630
+
485
631
  /** Contains configuration options specific to the binary quantization compression method used during indexing and querying. */
486
632
  export declare interface BinaryQuantizationCompression extends BaseVectorSearchCompression {
487
633
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -550,7 +696,7 @@ export declare interface ClassicTokenizer extends BaseLexicalTokenizer {
550
696
  /**
551
697
  * Contains the possible cases for CognitiveServicesAccount.
552
698
  */
553
- export declare type CognitiveServicesAccount = DefaultCognitiveServicesAccount | CognitiveServicesAccountKey;
699
+ export declare type CognitiveServicesAccount = DefaultCognitiveServicesAccount | CognitiveServicesAccountKey | AIServicesAccountKey | AIServicesAccountIdentity;
554
700
 
555
701
  /** The multi-region account key of an Azure AI service resource that's attached to a skillset. */
556
702
  export declare interface CognitiveServicesAccountKey extends BaseCognitiveServicesAccount {
@@ -619,6 +765,11 @@ export declare interface CorsOptions {
619
765
  */
620
766
  export declare type CountDocumentsOptions = OperationOptions;
621
767
 
768
+ /**
769
+ * Options for create alias operation.
770
+ */
771
+ export declare type CreateAliasOptions = OperationOptions;
772
+
622
773
  /**
623
774
  * Options for create datasource operation.
624
775
  */
@@ -634,6 +785,16 @@ export declare type CreateIndexerOptions = OperationOptions;
634
785
  */
635
786
  export declare type CreateIndexOptions = OperationOptions;
636
787
 
788
+ /**
789
+ * Options for create or update alias operation.
790
+ */
791
+ export declare interface CreateOrUpdateAliasOptions extends OperationOptions {
792
+ /**
793
+ * If set to true, Resource will be deleted only if the etag matches.
794
+ */
795
+ onlyIfUnchanged?: boolean;
796
+ }
797
+
637
798
  /**
638
799
  * Options for create/update datasource operation.
639
800
  */
@@ -642,6 +803,10 @@ export declare interface CreateorUpdateDataSourceConnectionOptions extends Opera
642
803
  * If set to true, Resource will be updated only if the etag matches.
643
804
  */
644
805
  onlyIfUnchanged?: boolean;
806
+ /**
807
+ * Ignores cache reset requirements.
808
+ */
809
+ skipIndexerResetRequirementForCache?: boolean;
645
810
  }
646
811
 
647
812
  /**
@@ -652,6 +817,10 @@ export declare interface CreateorUpdateIndexerOptions extends OperationOptions {
652
817
  * If set to true, Resource will be updated only if the etag matches.
653
818
  */
654
819
  onlyIfUnchanged?: boolean;
820
+ /** Ignores cache reset requirements. */
821
+ skipIndexerResetRequirementForCache?: boolean;
822
+ /** Disables cache reprocessing change detection. */
823
+ disableCacheReprocessingChangeDetection?: boolean;
655
824
  }
656
825
 
657
826
  /**
@@ -679,6 +848,14 @@ export declare interface CreateOrUpdateSkillsetOptions extends OperationOptions
679
848
  * If set to true, Resource will be updated only if the etag matches.
680
849
  */
681
850
  onlyIfUnchanged?: boolean;
851
+ /**
852
+ * Ignores cache reset requirements.
853
+ */
854
+ skipIndexerResetRequirementForCache?: boolean;
855
+ /**
856
+ * Disables cache reprocessing change detection.
857
+ */
858
+ disableCacheReprocessingChangeDetection?: boolean;
682
859
  }
683
860
 
684
861
  /**
@@ -805,6 +982,16 @@ export declare interface CustomEntityLookupSkill extends BaseSearchIndexerSkill
805
982
 
806
983
  export declare type CustomEntityLookupSkillLanguage = `${KnownCustomEntityLookupSkillLanguage}`;
807
984
 
985
+ /** Allows you to configure normalization for filterable, sortable, and facetable fields, which by default operate with strict matching. This is a user-defined configuration consisting of at least one or more filters, which modify the token that is stored. */
986
+ export declare interface CustomNormalizer extends BaseLexicalNormalizer {
987
+ /** Polymorphic discriminator, which specifies the different types this object can be */
988
+ odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
989
+ /** A list of token filters used to filter out or modify the input token. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */
990
+ tokenFilters?: TokenFilterName[];
991
+ /** A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */
992
+ charFilters?: CharFilterName[];
993
+ }
994
+
808
995
  /**
809
996
  * Contains the possible cases for DataChangeDetectionPolicy.
810
997
  */
@@ -813,7 +1000,16 @@ export declare type DataChangeDetectionPolicy = HighWaterMarkChangeDetectionPoli
813
1000
  /**
814
1001
  * Contains the possible cases for DataDeletionDetectionPolicy.
815
1002
  */
816
- export declare type DataDeletionDetectionPolicy = SoftDeleteColumnDeletionDetectionPolicy;
1003
+ export declare type DataDeletionDetectionPolicy = SoftDeleteColumnDeletionDetectionPolicy | NativeBlobSoftDeleteDeletionDetectionPolicy;
1004
+
1005
+ /** Contains debugging information that can be used to further explore your search results. */
1006
+ export declare interface DebugInfo {
1007
+ /**
1008
+ * Contains debugging information specific to query rewrites.
1009
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1010
+ */
1011
+ readonly queryRewrites?: QueryRewritesDebugInfo;
1012
+ }
817
1013
 
818
1014
  /**
819
1015
  * Default Batch Size
@@ -836,6 +1032,16 @@ export declare interface DefaultCognitiveServicesAccount extends BaseCognitiveSe
836
1032
  odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices";
837
1033
  }
838
1034
 
1035
+ /**
1036
+ * Options for delete alias operation.
1037
+ */
1038
+ export declare interface DeleteAliasOptions extends OperationOptions {
1039
+ /**
1040
+ * If set to true, Resource will be deleted only if the etag matches.
1041
+ */
1042
+ onlyIfUnchanged?: boolean;
1043
+ }
1044
+
839
1045
  /**
840
1046
  * Options for delete datasource operation.
841
1047
  */
@@ -923,6 +1129,20 @@ export declare interface DistanceScoringParameters {
923
1129
  boostingDistance: number;
924
1130
  }
925
1131
 
1132
+ /** Contains debugging information that can be used to further explore your search results. */
1133
+ export declare interface DocumentDebugInfo {
1134
+ /**
1135
+ * Contains debugging information specific to semantic search queries.
1136
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1137
+ */
1138
+ readonly semantic?: SemanticDebugInfo;
1139
+ /**
1140
+ * Contains debugging information specific to vector and hybrid search.
1141
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1142
+ */
1143
+ readonly vectors?: VectorsDebugInfo;
1144
+ }
1145
+
926
1146
  /** A skill that extracts content from a file within the enrichment pipeline. */
927
1147
  export declare interface DocumentExtractionSkill extends BaseSearchIndexerSkill {
928
1148
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -937,6 +1157,39 @@ export declare interface DocumentExtractionSkill extends BaseSearchIndexerSkill
937
1157
  };
938
1158
  }
939
1159
 
1160
+ /** A skill that extracts content and layout information (as markdown), via Azure AI Services, from files within the enrichment pipeline. */
1161
+ export declare interface DocumentIntelligenceLayoutSkill extends BaseSearchIndexerSkill {
1162
+ /** Polymorphic discriminator, which specifies the different types this object can be */
1163
+ odatatype: "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill";
1164
+ /** Controls the cardinality of the output produced by the skill. Default is 'oneToMany'. */
1165
+ outputMode?: DocumentIntelligenceLayoutSkillOutputMode;
1166
+ /** The depth of headers in the markdown output. Default is h6. */
1167
+ markdownHeaderDepth?: DocumentIntelligenceLayoutSkillMarkdownHeaderDepth;
1168
+ }
1169
+
1170
+ /**
1171
+ * Defines values for DocumentIntelligenceLayoutSkillMarkdownHeaderDepth. \
1172
+ * {@link KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth} can be used interchangeably with DocumentIntelligenceLayoutSkillMarkdownHeaderDepth,
1173
+ * this enum contains the known values that the service supports.
1174
+ * ### Known values supported by the service
1175
+ * **h1**: Header level 1. \
1176
+ * **h2**: Header level 2. \
1177
+ * **h3**: Header level 3. \
1178
+ * **h4**: Header level 4. \
1179
+ * **h5**: Header level 5. \
1180
+ * **h6**: Header level 6.
1181
+ */
1182
+ export declare type DocumentIntelligenceLayoutSkillMarkdownHeaderDepth = string;
1183
+
1184
+ /**
1185
+ * Defines values for DocumentIntelligenceLayoutSkillOutputMode. \
1186
+ * {@link KnownDocumentIntelligenceLayoutSkillOutputMode} can be used interchangeably with DocumentIntelligenceLayoutSkillOutputMode,
1187
+ * this enum contains the known values that the service supports.
1188
+ * ### Known values supported by the service
1189
+ * **oneToMany**: Specify the deepest markdown header section to parse.
1190
+ */
1191
+ export declare type DocumentIntelligenceLayoutSkillOutputMode = string;
1192
+
940
1193
  /**
941
1194
  * Generates n-grams of the given size(s) starting from the front or the back of an input token.
942
1195
  * This token filter is implemented using Apache Lucene.
@@ -1073,12 +1326,20 @@ export declare interface ExtractiveQueryAnswer {
1073
1326
  * The confidence threshold. Default threshold is 0.7
1074
1327
  */
1075
1328
  threshold?: number;
1329
+ /**
1330
+ * An optional upper bound on the number of characters in each answer.
1331
+ */
1332
+ maxAnswerLength?: number;
1076
1333
  }
1077
1334
 
1078
1335
  /** Extracts captions from the matching documents that contain passages relevant to the search query. */
1079
1336
  export declare interface ExtractiveQueryCaption {
1080
1337
  captionType: "extractive";
1081
1338
  highlight?: boolean;
1339
+ /**
1340
+ * An optional upper bound on the number of characters in each caption.
1341
+ */
1342
+ maxCaptionLength?: number;
1082
1343
  }
1083
1344
 
1084
1345
  /** A single bucket of a facet query result. Reports the number of documents with a field value falling within a particular range or having a particular value or interval. */
@@ -1090,6 +1351,13 @@ export declare interface FacetResult {
1090
1351
  * NOTE: This property will not be serialized. It can only be populated by the server.
1091
1352
  */
1092
1353
  readonly count?: number;
1354
+ /**
1355
+ * The nested facet query results for the search operation, organized as a collection of buckets for each faceted field; null if the query did not contain any nested facets.
1356
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1357
+ */
1358
+ readonly facets?: {
1359
+ [propertyName: string]: FacetResult[];
1360
+ };
1093
1361
  }
1094
1362
 
1095
1363
  /** Defines a mapping between a field in a data source and a target field in an index. */
@@ -1126,6 +1394,14 @@ export declare interface FreshnessScoringParameters {
1126
1394
  boostingDuration: string;
1127
1395
  }
1128
1396
 
1397
+ /** Generate alternative query terms to increase the recall of a search request. */
1398
+ export declare interface GenerativeQueryRewrites {
1399
+ /** Polymorphic discriminator, which specifies the different types this object can be */
1400
+ rewritesType: "generative";
1401
+ /** The number of query rewrites to generate. Defaults to 10.*/
1402
+ count?: number;
1403
+ }
1404
+
1129
1405
  /**
1130
1406
  * Represents a geographic point in global coordinates.
1131
1407
  */
@@ -1153,6 +1429,11 @@ export declare class GeographyPoint {
1153
1429
  toJSON(): Record<string, unknown>;
1154
1430
  }
1155
1431
 
1432
+ /**
1433
+ * Options for get alias operation.
1434
+ */
1435
+ export declare type GetAliasOptions = OperationOptions;
1436
+
1156
1437
  /**
1157
1438
  * Options for get datasource operation.
1158
1439
  */
@@ -1257,6 +1538,24 @@ export declare interface HnswParameters {
1257
1538
  metric?: VectorSearchAlgorithmMetric;
1258
1539
  }
1259
1540
 
1541
+ /**
1542
+ * Defines values for HybridCountAndFacetMode. \
1543
+ * {@link KnownHybridCountAndFacetMode} can be used interchangeably with HybridCountAndFacetMode,
1544
+ * this enum contains the known values that the service supports.
1545
+ * ### Known values supported by the service
1546
+ * **countRetrievableResults**: Only include documents that were matched within the 'maxTextRecallSize' retrieval window when computing 'count' and 'facets'. \
1547
+ * **countAllResults**: Include all documents that were matched by the search query when computing 'count' and 'facets', regardless of whether or not those documents are within the 'maxTextRecallSize' retrieval window.
1548
+ */
1549
+ export declare type HybridCountAndFacetMode = string;
1550
+
1551
+ /** TThe query parameters to configure hybrid search behaviors. */
1552
+ export declare interface HybridSearchOptions {
1553
+ /** Determines the maximum number of documents to be retrieved by the text query portion of a hybrid search request. Those documents will be combined with the documents matching the vector queries to produce a single final list of results. Choosing a larger maxTextRecallSize value will allow retrieving and paging through more documents (using the top and skip parameters), at the cost of higher resource utilization and higher latency. The value needs to be between 1 and 10,000. Default is 1000. */
1554
+ maxTextRecallSize?: number;
1555
+ /** Determines whether the count and facets should includes all documents that matched the search query, or only the documents that are retrieved within the 'maxTextRecallSize' window. */
1556
+ countAndFacetMode?: HybridCountAndFacetMode;
1557
+ }
1558
+
1260
1559
  /** A skill that analyzes image files. It extracts a rich set of visual features based on the image content. */
1261
1560
  export declare interface ImageAnalysisSkill extends BaseSearchIndexerSkill {
1262
1561
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -1382,6 +1681,16 @@ export declare interface IndexerExecutionResult {
1382
1681
  * NOTE: This property will not be serialized. It can only be populated by the server.
1383
1682
  */
1384
1683
  readonly status: IndexerExecutionStatus;
1684
+ /**
1685
+ * The outcome of this indexer execution.
1686
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1687
+ */
1688
+ readonly statusDetail?: IndexerExecutionStatusDetail;
1689
+ /**
1690
+ * All of the state that defines and dictates the indexer's current execution.
1691
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1692
+ */
1693
+ readonly currentState?: IndexerState;
1385
1694
  /**
1386
1695
  * The error message indicating the top-level error, if any.
1387
1696
  * NOTE: This property will not be serialized. It can only be populated by the server.
@@ -1432,9 +1741,67 @@ export declare interface IndexerExecutionResult {
1432
1741
  /** Defines values for IndexerExecutionStatus. */
1433
1742
  export declare type IndexerExecutionStatus = "transientFailure" | "success" | "inProgress" | "reset";
1434
1743
 
1744
+ /**
1745
+ * Defines values for IndexerExecutionStatusDetail. \
1746
+ * {@link KnownIndexerExecutionStatusDetail} can be used interchangeably with IndexerExecutionStatusDetail,
1747
+ * this enum contains the known values that the service supports.
1748
+ * ### Known values supported by the service
1749
+ * **resetDocs**: Indicates that the reset that occurred was for a call to ResetDocs.
1750
+ */
1751
+ export declare type IndexerExecutionStatusDetail = string;
1752
+
1753
+ /** Represents all of the state that defines and dictates the indexer's current execution. */
1754
+ export declare interface IndexerState {
1755
+ /**
1756
+ * The mode the indexer is running in.
1757
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1758
+ */
1759
+ readonly mode?: IndexingMode;
1760
+ /**
1761
+ * Change tracking state used when indexing starts on all documents in the datasource.
1762
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1763
+ */
1764
+ readonly allDocumentsInitialChangeTrackingState?: string;
1765
+ /**
1766
+ * Change tracking state value when indexing finishes on all documents in the datasource.
1767
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1768
+ */
1769
+ readonly allDocumentsFinalChangeTrackingState?: string;
1770
+ /**
1771
+ * Change tracking state used when indexing starts on select, reset documents in the datasource.
1772
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1773
+ */
1774
+ readonly resetDocumentsInitialChangeTrackingState?: string;
1775
+ /**
1776
+ * Change tracking state value when indexing finishes on select, reset documents in the datasource.
1777
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1778
+ */
1779
+ readonly resetDocumentsFinalChangeTrackingState?: string;
1780
+ /**
1781
+ * The list of document keys that have been reset. The document key is the document's unique identifier for the data in the search index. The indexer will prioritize selectively re-ingesting these keys.
1782
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1783
+ */
1784
+ readonly resetDocumentKeys?: string[];
1785
+ /**
1786
+ * The list of datasource document ids that have been reset. The datasource document id is the unique identifier for the data in the datasource. The indexer will prioritize selectively re-ingesting these ids.
1787
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1788
+ */
1789
+ readonly resetDatasourceDocumentIds?: string[];
1790
+ }
1791
+
1435
1792
  /** Defines values for IndexerStatus. */
1436
1793
  export declare type IndexerStatus = "unknown" | "error" | "running";
1437
1794
 
1795
+ /**
1796
+ * Defines values for IndexingMode. \
1797
+ * {@link KnownIndexingMode} can be used interchangeably with IndexingMode,
1798
+ * this enum contains the known values that the service supports.
1799
+ * ### Known values supported by the service
1800
+ * **indexingAllDocs**: The indexer is indexing all documents in the datasource. \
1801
+ * **indexingResetDocs**: The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status.
1802
+ */
1803
+ export declare type IndexingMode = string;
1804
+
1438
1805
  /** Represents parameters for indexer execution. */
1439
1806
  export declare interface IndexingParameters {
1440
1807
  /** The number of items that are read from the data source and indexed as a single batch in order to improve performance. The default depends on the data source type. */
@@ -1461,7 +1828,7 @@ export declare interface IndexingParametersConfiguration {
1461
1828
  failOnUnsupportedContentType?: boolean;
1462
1829
  /** For Azure blobs, set to false if you want to continue indexing if a document fails indexing. */
1463
1830
  failOnUnprocessableDocument?: boolean;
1464
- /** For Azure blobs, set this property to true to still index storage metadata for blob content that is too large to process. Oversized blobs are treated as errors by default. For limits on blob size, see https://docs.microsoft.com/azure/search/search-limits-quotas-capacity. */
1831
+ /** For Azure blobs, set this property to true to still index storage metadata for blob content that is too large to process. Oversized blobs are treated as errors by default. For limits on blob size, see https://learn.microsoft.com/azure/search/search-limits-quotas-capacity. */
1465
1832
  indexStorageMetadataOnlyForOversizedDocuments?: boolean;
1466
1833
  /** For CSV blobs, specifies a comma-delimited list of column headers, useful for mapping source fields to destination fields in an index. */
1467
1834
  delimitedTextHeaders?: string;
@@ -1469,6 +1836,10 @@ export declare interface IndexingParametersConfiguration {
1469
1836
  delimitedTextDelimiter?: string;
1470
1837
  /** For CSV blobs, indicates that the first (non-blank) line of each blob contains headers. */
1471
1838
  firstLineContainsHeaders?: boolean;
1839
+ /** Specifies the submode that will determine whether a markdown file will be parsed into exactly one search document or multiple search documents. Default is `oneToMany`. */
1840
+ markdownParsingSubmode?: MarkdownParsingSubmode;
1841
+ /** Specifies the max header depth that will be considered while grouping markdown content. Default is `h6`. */
1842
+ markdownHeaderDepth?: MarkdownHeaderDepth;
1472
1843
  /** For JSON arrays, given a structured or semi-structured document, you can specify a path to the array using this property. */
1473
1844
  documentRoot?: string;
1474
1845
  /** Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when "imageAction" is set to a value other than "none". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. */
@@ -1563,6 +1934,18 @@ export declare interface KeepTokenFilter extends BaseTokenFilter {
1563
1934
  lowerCaseKeepWords?: boolean;
1564
1935
  }
1565
1936
 
1937
+ /**
1938
+ * Specifies the properties for connecting to an AML vectorizer with an authentication key.
1939
+ */
1940
+ export declare interface KeyAuthAzureMachineLearningVectorizerParameters extends BaseAzureMachineLearningVectorizerParameters {
1941
+ /** Indicates how the service should attempt to identify itself to the AML instance */
1942
+ authKind: "key";
1943
+ /** The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */
1944
+ scoringUri: string;
1945
+ /** The key for the AML service. */
1946
+ authenticationKey: string;
1947
+ }
1948
+
1566
1949
  /** A skill that uses text analytics for key phrase extraction. */
1567
1950
  export declare interface KeyPhraseExtractionSkill extends BaseSearchIndexerSkill {
1568
1951
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -1608,6 +1991,22 @@ export declare interface KeywordTokenizer {
1608
1991
  maxTokenLength?: number;
1609
1992
  }
1610
1993
 
1994
+ /** Known values of {@link AIStudioModelCatalogName} that the service accepts. */
1995
+ export declare enum KnownAIStudioModelCatalogName {
1996
+ /** OpenAIClipImageTextEmbeddingsVitBasePatch32 */
1997
+ OpenAIClipImageTextEmbeddingsVitBasePatch32 = "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32",
1998
+ /** OpenAIClipImageTextEmbeddingsViTLargePatch14336 */
1999
+ OpenAIClipImageTextEmbeddingsViTLargePatch14336 = "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336",
2000
+ /** FacebookDinoV2ImageEmbeddingsViTBase */
2001
+ FacebookDinoV2ImageEmbeddingsViTBase = "Facebook-DinoV2-Image-Embeddings-ViT-Base",
2002
+ /** FacebookDinoV2ImageEmbeddingsViTGiant */
2003
+ FacebookDinoV2ImageEmbeddingsViTGiant = "Facebook-DinoV2-Image-Embeddings-ViT-Giant",
2004
+ /** CohereEmbedV3English */
2005
+ CohereEmbedV3English = "Cohere-embed-v3-english",
2006
+ /** CohereEmbedV3Multilingual */
2007
+ CohereEmbedV3Multilingual = "Cohere-embed-v3-multilingual"
2008
+ }
2009
+
1611
2010
  /**
1612
2011
  * Defines values for AnalyzerName.
1613
2012
  * See https://docs.microsoft.com/rest/api/searchservice/Language-support
@@ -2030,7 +2429,9 @@ export declare enum KnownBlobIndexerParsingMode {
2030
2429
  /** Set to jsonArray to extract individual elements of a JSON array as separate documents. */
2031
2430
  JsonArray = "jsonArray",
2032
2431
  /** Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. */
2033
- JsonLines = "jsonLines"
2432
+ JsonLines = "jsonLines",
2433
+ /** Set to markdown to extract content from markdown files. */
2434
+ Markdown = "markdown"
2034
2435
  }
2035
2436
 
2036
2437
  /** Known values of {@link BlobIndexerPDFTextRotationAlgorithm} that the service accepts. */
@@ -2041,9 +2442,15 @@ export declare enum KnownBlobIndexerPDFTextRotationAlgorithm {
2041
2442
  DetectAngles = "detectAngles"
2042
2443
  }
2043
2444
 
2044
- /** Known values of {@link CharFilterName} that the service accepts. */
2445
+ /**
2446
+ * Defines values for CharFilterName.
2447
+ * @readonly
2448
+ */
2045
2449
  export declare enum KnownCharFilterNames {
2046
- /** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */
2450
+ /**
2451
+ * A character filter that attempts to strip out HTML constructs. See
2452
+ * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.html
2453
+ */
2047
2454
  HtmlStrip = "html_strip"
2048
2455
  }
2049
2456
 
@@ -2069,6 +2476,28 @@ export declare enum KnownCustomEntityLookupSkillLanguage {
2069
2476
  Pt = "pt"
2070
2477
  }
2071
2478
 
2479
+ /** Known values of {@link DocumentIntelligenceLayoutSkillMarkdownHeaderDepth} that the service accepts. */
2480
+ export declare enum KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth {
2481
+ /** Header level 1. */
2482
+ H1 = "h1",
2483
+ /** Header level 2. */
2484
+ H2 = "h2",
2485
+ /** Header level 3. */
2486
+ H3 = "h3",
2487
+ /** Header level 4. */
2488
+ H4 = "h4",
2489
+ /** Header level 5. */
2490
+ H5 = "h5",
2491
+ /** Header level 6. */
2492
+ H6 = "h6"
2493
+ }
2494
+
2495
+ /** Known values of {@link DocumentIntelligenceLayoutSkillOutputMode} that the service accepts. */
2496
+ export declare enum KnownDocumentIntelligenceLayoutSkillOutputMode {
2497
+ /** Specify the deepest markdown header section to parse. */
2498
+ OneToMany = "oneToMany"
2499
+ }
2500
+
2072
2501
  /** Known values of {@link EntityCategory} that the service accepts. */
2073
2502
  export declare enum KnownEntityCategory {
2074
2503
  /** Entities describing a physical location. */
@@ -2137,6 +2566,14 @@ export declare enum KnownEntityRecognitionSkillLanguage {
2137
2566
  Tr = "tr"
2138
2567
  }
2139
2568
 
2569
+ /** Known values of {@link HybridCountAndFacetMode} that the service accepts. */
2570
+ export declare enum KnownHybridCountAndFacetMode {
2571
+ /** Only include documents that were matched within the 'maxTextRecallSize' retrieval window when computing 'count' and 'facets'. */
2572
+ CountRetrievableResults = "countRetrievableResults",
2573
+ /** Include all documents that were matched by the search query when computing 'count' and 'facets', regardless of whether or not those documents are within the 'maxTextRecallSize' retrieval window. */
2574
+ CountAllResults = "countAllResults"
2575
+ }
2576
+
2140
2577
  /** Known values of {@link ImageAnalysisSkillLanguage} that the service accepts. */
2141
2578
  export declare enum KnownImageAnalysisSkillLanguage {
2142
2579
  /** Arabic */
@@ -2261,6 +2698,20 @@ export declare enum KnownIndexerExecutionEnvironment {
2261
2698
  Private = "private"
2262
2699
  }
2263
2700
 
2701
+ /** Known values of {@link IndexerExecutionStatusDetail} that the service accepts. */
2702
+ export declare enum KnownIndexerExecutionStatusDetail {
2703
+ /** Indicates that the reset that occurred was for a call to ResetDocs. */
2704
+ ResetDocs = "resetDocs"
2705
+ }
2706
+
2707
+ /** Known values of {@link IndexingMode} that the service accepts. */
2708
+ export declare enum KnownIndexingMode {
2709
+ /** The indexer is indexing all documents in the datasource. */
2710
+ IndexingAllDocs = "indexingAllDocs",
2711
+ /** The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status. */
2712
+ IndexingResetDocs = "indexingResetDocs"
2713
+ }
2714
+
2264
2715
  /** Known values of {@link IndexProjectionMode} that the service accepts. */
2265
2716
  export declare enum KnownIndexProjectionMode {
2266
2717
  /** The source document will be skipped from writing into the indexer's target index. */
@@ -2305,6 +2756,236 @@ export declare enum KnownKeyPhraseExtractionSkillLanguage {
2305
2756
  Sv = "sv"
2306
2757
  }
2307
2758
 
2759
+ /** Known values of {@link LexicalAnalyzerName} that the service accepts. */
2760
+ export declare enum KnownLexicalAnalyzerName {
2761
+ /** Microsoft analyzer for Arabic. */
2762
+ ArMicrosoft = "ar.microsoft",
2763
+ /** Lucene analyzer for Arabic. */
2764
+ ArLucene = "ar.lucene",
2765
+ /** Lucene analyzer for Armenian. */
2766
+ HyLucene = "hy.lucene",
2767
+ /** Microsoft analyzer for Bangla. */
2768
+ BnMicrosoft = "bn.microsoft",
2769
+ /** Lucene analyzer for Basque. */
2770
+ EuLucene = "eu.lucene",
2771
+ /** Microsoft analyzer for Bulgarian. */
2772
+ BgMicrosoft = "bg.microsoft",
2773
+ /** Lucene analyzer for Bulgarian. */
2774
+ BgLucene = "bg.lucene",
2775
+ /** Microsoft analyzer for Catalan. */
2776
+ CaMicrosoft = "ca.microsoft",
2777
+ /** Lucene analyzer for Catalan. */
2778
+ CaLucene = "ca.lucene",
2779
+ /** Microsoft analyzer for Chinese (Simplified). */
2780
+ ZhHansMicrosoft = "zh-Hans.microsoft",
2781
+ /** Lucene analyzer for Chinese (Simplified). */
2782
+ ZhHansLucene = "zh-Hans.lucene",
2783
+ /** Microsoft analyzer for Chinese (Traditional). */
2784
+ ZhHantMicrosoft = "zh-Hant.microsoft",
2785
+ /** Lucene analyzer for Chinese (Traditional). */
2786
+ ZhHantLucene = "zh-Hant.lucene",
2787
+ /** Microsoft analyzer for Croatian. */
2788
+ HrMicrosoft = "hr.microsoft",
2789
+ /** Microsoft analyzer for Czech. */
2790
+ CsMicrosoft = "cs.microsoft",
2791
+ /** Lucene analyzer for Czech. */
2792
+ CsLucene = "cs.lucene",
2793
+ /** Microsoft analyzer for Danish. */
2794
+ DaMicrosoft = "da.microsoft",
2795
+ /** Lucene analyzer for Danish. */
2796
+ DaLucene = "da.lucene",
2797
+ /** Microsoft analyzer for Dutch. */
2798
+ NlMicrosoft = "nl.microsoft",
2799
+ /** Lucene analyzer for Dutch. */
2800
+ NlLucene = "nl.lucene",
2801
+ /** Microsoft analyzer for English. */
2802
+ EnMicrosoft = "en.microsoft",
2803
+ /** Lucene analyzer for English. */
2804
+ EnLucene = "en.lucene",
2805
+ /** Microsoft analyzer for Estonian. */
2806
+ EtMicrosoft = "et.microsoft",
2807
+ /** Microsoft analyzer for Finnish. */
2808
+ FiMicrosoft = "fi.microsoft",
2809
+ /** Lucene analyzer for Finnish. */
2810
+ FiLucene = "fi.lucene",
2811
+ /** Microsoft analyzer for French. */
2812
+ FrMicrosoft = "fr.microsoft",
2813
+ /** Lucene analyzer for French. */
2814
+ FrLucene = "fr.lucene",
2815
+ /** Lucene analyzer for Galician. */
2816
+ GlLucene = "gl.lucene",
2817
+ /** Microsoft analyzer for German. */
2818
+ DeMicrosoft = "de.microsoft",
2819
+ /** Lucene analyzer for German. */
2820
+ DeLucene = "de.lucene",
2821
+ /** Microsoft analyzer for Greek. */
2822
+ ElMicrosoft = "el.microsoft",
2823
+ /** Lucene analyzer for Greek. */
2824
+ ElLucene = "el.lucene",
2825
+ /** Microsoft analyzer for Gujarati. */
2826
+ GuMicrosoft = "gu.microsoft",
2827
+ /** Microsoft analyzer for Hebrew. */
2828
+ HeMicrosoft = "he.microsoft",
2829
+ /** Microsoft analyzer for Hindi. */
2830
+ HiMicrosoft = "hi.microsoft",
2831
+ /** Lucene analyzer for Hindi. */
2832
+ HiLucene = "hi.lucene",
2833
+ /** Microsoft analyzer for Hungarian. */
2834
+ HuMicrosoft = "hu.microsoft",
2835
+ /** Lucene analyzer for Hungarian. */
2836
+ HuLucene = "hu.lucene",
2837
+ /** Microsoft analyzer for Icelandic. */
2838
+ IsMicrosoft = "is.microsoft",
2839
+ /** Microsoft analyzer for Indonesian (Bahasa). */
2840
+ IdMicrosoft = "id.microsoft",
2841
+ /** Lucene analyzer for Indonesian. */
2842
+ IdLucene = "id.lucene",
2843
+ /** Lucene analyzer for Irish. */
2844
+ GaLucene = "ga.lucene",
2845
+ /** Microsoft analyzer for Italian. */
2846
+ ItMicrosoft = "it.microsoft",
2847
+ /** Lucene analyzer for Italian. */
2848
+ ItLucene = "it.lucene",
2849
+ /** Microsoft analyzer for Japanese. */
2850
+ JaMicrosoft = "ja.microsoft",
2851
+ /** Lucene analyzer for Japanese. */
2852
+ JaLucene = "ja.lucene",
2853
+ /** Microsoft analyzer for Kannada. */
2854
+ KnMicrosoft = "kn.microsoft",
2855
+ /** Microsoft analyzer for Korean. */
2856
+ KoMicrosoft = "ko.microsoft",
2857
+ /** Lucene analyzer for Korean. */
2858
+ KoLucene = "ko.lucene",
2859
+ /** Microsoft analyzer for Latvian. */
2860
+ LvMicrosoft = "lv.microsoft",
2861
+ /** Lucene analyzer for Latvian. */
2862
+ LvLucene = "lv.lucene",
2863
+ /** Microsoft analyzer for Lithuanian. */
2864
+ LtMicrosoft = "lt.microsoft",
2865
+ /** Microsoft analyzer for Malayalam. */
2866
+ MlMicrosoft = "ml.microsoft",
2867
+ /** Microsoft analyzer for Malay (Latin). */
2868
+ MsMicrosoft = "ms.microsoft",
2869
+ /** Microsoft analyzer for Marathi. */
2870
+ MrMicrosoft = "mr.microsoft",
2871
+ /** Microsoft analyzer for Norwegian (Bokmål). */
2872
+ NbMicrosoft = "nb.microsoft",
2873
+ /** Lucene analyzer for Norwegian. */
2874
+ NoLucene = "no.lucene",
2875
+ /** Lucene analyzer for Persian. */
2876
+ FaLucene = "fa.lucene",
2877
+ /** Microsoft analyzer for Polish. */
2878
+ PlMicrosoft = "pl.microsoft",
2879
+ /** Lucene analyzer for Polish. */
2880
+ PlLucene = "pl.lucene",
2881
+ /** Microsoft analyzer for Portuguese (Brazil). */
2882
+ PtBrMicrosoft = "pt-BR.microsoft",
2883
+ /** Lucene analyzer for Portuguese (Brazil). */
2884
+ PtBrLucene = "pt-BR.lucene",
2885
+ /** Microsoft analyzer for Portuguese (Portugal). */
2886
+ PtPtMicrosoft = "pt-PT.microsoft",
2887
+ /** Lucene analyzer for Portuguese (Portugal). */
2888
+ PtPtLucene = "pt-PT.lucene",
2889
+ /** Microsoft analyzer for Punjabi. */
2890
+ PaMicrosoft = "pa.microsoft",
2891
+ /** Microsoft analyzer for Romanian. */
2892
+ RoMicrosoft = "ro.microsoft",
2893
+ /** Lucene analyzer for Romanian. */
2894
+ RoLucene = "ro.lucene",
2895
+ /** Microsoft analyzer for Russian. */
2896
+ RuMicrosoft = "ru.microsoft",
2897
+ /** Lucene analyzer for Russian. */
2898
+ RuLucene = "ru.lucene",
2899
+ /** Microsoft analyzer for Serbian (Cyrillic). */
2900
+ SrCyrillicMicrosoft = "sr-cyrillic.microsoft",
2901
+ /** Microsoft analyzer for Serbian (Latin). */
2902
+ SrLatinMicrosoft = "sr-latin.microsoft",
2903
+ /** Microsoft analyzer for Slovak. */
2904
+ SkMicrosoft = "sk.microsoft",
2905
+ /** Microsoft analyzer for Slovenian. */
2906
+ SlMicrosoft = "sl.microsoft",
2907
+ /** Microsoft analyzer for Spanish. */
2908
+ EsMicrosoft = "es.microsoft",
2909
+ /** Lucene analyzer for Spanish. */
2910
+ EsLucene = "es.lucene",
2911
+ /** Microsoft analyzer for Swedish. */
2912
+ SvMicrosoft = "sv.microsoft",
2913
+ /** Lucene analyzer for Swedish. */
2914
+ SvLucene = "sv.lucene",
2915
+ /** Microsoft analyzer for Tamil. */
2916
+ TaMicrosoft = "ta.microsoft",
2917
+ /** Microsoft analyzer for Telugu. */
2918
+ TeMicrosoft = "te.microsoft",
2919
+ /** Microsoft analyzer for Thai. */
2920
+ ThMicrosoft = "th.microsoft",
2921
+ /** Lucene analyzer for Thai. */
2922
+ ThLucene = "th.lucene",
2923
+ /** Microsoft analyzer for Turkish. */
2924
+ TrMicrosoft = "tr.microsoft",
2925
+ /** Lucene analyzer for Turkish. */
2926
+ TrLucene = "tr.lucene",
2927
+ /** Microsoft analyzer for Ukrainian. */
2928
+ UkMicrosoft = "uk.microsoft",
2929
+ /** Microsoft analyzer for Urdu. */
2930
+ UrMicrosoft = "ur.microsoft",
2931
+ /** Microsoft analyzer for Vietnamese. */
2932
+ ViMicrosoft = "vi.microsoft",
2933
+ /** Standard Lucene analyzer. */
2934
+ StandardLucene = "standard.lucene",
2935
+ /** Standard ASCII Folding Lucene analyzer. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#Analyzers */
2936
+ StandardAsciiFoldingLucene = "standardasciifolding.lucene",
2937
+ /** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordAnalyzer.html */
2938
+ Keyword = "keyword",
2939
+ /** Flexibly separates text into terms via a regular expression pattern. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/PatternAnalyzer.html */
2940
+ Pattern = "pattern",
2941
+ /** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/SimpleAnalyzer.html */
2942
+ Simple = "simple",
2943
+ /** Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopAnalyzer.html */
2944
+ Stop = "stop",
2945
+ /** An analyzer that uses the whitespace tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceAnalyzer.html */
2946
+ Whitespace = "whitespace"
2947
+ }
2948
+
2949
+ /** Known values of {@link LexicalNormalizerName} that the service accepts. */
2950
+ declare enum KnownLexicalNormalizerName {
2951
+ /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
2952
+ AsciiFolding = "asciifolding",
2953
+ /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
2954
+ Elision = "elision",
2955
+ /** Normalizes token text to lowercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
2956
+ Lowercase = "lowercase",
2957
+ /** Standard normalizer, which consists of lowercase and asciifolding. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
2958
+ Standard = "standard",
2959
+ /** Normalizes token text to uppercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
2960
+ Uppercase = "uppercase"
2961
+ }
2962
+ export { KnownLexicalNormalizerName }
2963
+ export { KnownLexicalNormalizerName as KnownNormalizerNames }
2964
+
2965
+ /** Known values of {@link MarkdownHeaderDepth} that the service accepts. */
2966
+ export declare enum KnownMarkdownHeaderDepth {
2967
+ /** Indicates that headers up to a level of h1 will be considered while grouping markdown content. */
2968
+ H1 = "h1",
2969
+ /** Indicates that headers up to a level of h2 will be considered while grouping markdown content. */
2970
+ H2 = "h2",
2971
+ /** Indicates that headers up to a level of h3 will be considered while grouping markdown content. */
2972
+ H3 = "h3",
2973
+ /** Indicates that headers up to a level of h4 will be considered while grouping markdown content. */
2974
+ H4 = "h4",
2975
+ /** Indicates that headers up to a level of h5 will be considered while grouping markdown content. */
2976
+ H5 = "h5",
2977
+ /** Indicates that headers up to a level of h6 will be considered while grouping markdown content. This is the default. */
2978
+ H6 = "h6"
2979
+ }
2980
+
2981
+ /** Known values of {@link MarkdownParsingSubmode} that the service accepts. */
2982
+ export declare enum KnownMarkdownParsingSubmode {
2983
+ /** Indicates that each section of the markdown file (up to a specified depth) will be parsed into individual search documents. This can result in a single markdown file producing multiple search documents. This is the default sub-mode. */
2984
+ OneToMany = "oneToMany",
2985
+ /** Indicates that each markdown file will be parsed into a single search document. */
2986
+ OneToOne = "oneToOne"
2987
+ }
2988
+
2308
2989
  /** Known values of {@link OcrLineEnding} that the service accepts. */
2309
2990
  export declare enum KnownOcrLineEnding {
2310
2991
  /** Lines are separated by a single space character. */
@@ -2669,6 +3350,176 @@ export declare enum KnownPIIDetectionSkillMaskingMode {
2669
3350
  Replace = "replace"
2670
3351
  }
2671
3352
 
3353
+ /** Known values of {@link QueryDebugMode} that the service accepts. */
3354
+ export declare enum KnownQueryDebugMode {
3355
+ /** No query debugging information will be returned. */
3356
+ Disabled = "disabled",
3357
+ /** Allows the user to further explore their reranked results. */
3358
+ Semantic = "semantic",
3359
+ /** Allows the user to further explore their hybrid and vector query results. */
3360
+ Vector = "vector",
3361
+ /** Allows the user to explore the list of query rewrites generated for their search request. */
3362
+ QueryRewrites = "queryRewrites",
3363
+ /** Turn on all debug options. */
3364
+ All = "all"
3365
+ }
3366
+
3367
+ /** Known values of {@link QueryLanguage} that the service accepts. */
3368
+ export declare enum KnownQueryLanguage {
3369
+ /** Query language not specified. */
3370
+ None = "none",
3371
+ /** Query language value for English (United States). */
3372
+ EnUs = "en-us",
3373
+ /** Query language value for English (Great Britain). */
3374
+ EnGb = "en-gb",
3375
+ /** Query language value for English (India). */
3376
+ EnIn = "en-in",
3377
+ /** Query language value for English (Canada). */
3378
+ EnCa = "en-ca",
3379
+ /** Query language value for English (Australia). */
3380
+ EnAu = "en-au",
3381
+ /** Query language value for French (France). */
3382
+ FrFr = "fr-fr",
3383
+ /** Query language value for French (Canada). */
3384
+ FrCa = "fr-ca",
3385
+ /** Query language value for German (Germany). */
3386
+ DeDe = "de-de",
3387
+ /** Query language value for Spanish (Spain). */
3388
+ EsEs = "es-es",
3389
+ /** Query language value for Spanish (Mexico). */
3390
+ EsMx = "es-mx",
3391
+ /** Query language value for Chinese (China). */
3392
+ ZhCn = "zh-cn",
3393
+ /** Query language value for Chinese (Taiwan). */
3394
+ ZhTw = "zh-tw",
3395
+ /** Query language value for Portuguese (Brazil). */
3396
+ PtBr = "pt-br",
3397
+ /** Query language value for Portuguese (Portugal). */
3398
+ PtPt = "pt-pt",
3399
+ /** Query language value for Italian (Italy). */
3400
+ ItIt = "it-it",
3401
+ /** Query language value for Japanese (Japan). */
3402
+ JaJp = "ja-jp",
3403
+ /** Query language value for Korean (Korea). */
3404
+ KoKr = "ko-kr",
3405
+ /** Query language value for Russian (Russia). */
3406
+ RuRu = "ru-ru",
3407
+ /** Query language value for Czech (Czech Republic). */
3408
+ CsCz = "cs-cz",
3409
+ /** Query language value for Dutch (Belgium). */
3410
+ NlBe = "nl-be",
3411
+ /** Query language value for Dutch (Netherlands). */
3412
+ NlNl = "nl-nl",
3413
+ /** Query language value for Hungarian (Hungary). */
3414
+ HuHu = "hu-hu",
3415
+ /** Query language value for Polish (Poland). */
3416
+ PlPl = "pl-pl",
3417
+ /** Query language value for Swedish (Sweden). */
3418
+ SvSe = "sv-se",
3419
+ /** Query language value for Turkish (Turkey). */
3420
+ TrTr = "tr-tr",
3421
+ /** Query language value for Hindi (India). */
3422
+ HiIn = "hi-in",
3423
+ /** Query language value for Arabic (Saudi Arabia). */
3424
+ ArSa = "ar-sa",
3425
+ /** Query language value for Arabic (Egypt). */
3426
+ ArEg = "ar-eg",
3427
+ /** Query language value for Arabic (Morocco). */
3428
+ ArMa = "ar-ma",
3429
+ /** Query language value for Arabic (Kuwait). */
3430
+ ArKw = "ar-kw",
3431
+ /** Query language value for Arabic (Jordan). */
3432
+ ArJo = "ar-jo",
3433
+ /** Query language value for Danish (Denmark). */
3434
+ DaDk = "da-dk",
3435
+ /** Query language value for Norwegian (Norway). */
3436
+ NoNo = "no-no",
3437
+ /** Query language value for Bulgarian (Bulgaria). */
3438
+ BgBg = "bg-bg",
3439
+ /** Query language value for Croatian (Croatia). */
3440
+ HrHr = "hr-hr",
3441
+ /** Query language value for Croatian (Bosnia and Herzegovina). */
3442
+ HrBa = "hr-ba",
3443
+ /** Query language value for Malay (Malaysia). */
3444
+ MsMy = "ms-my",
3445
+ /** Query language value for Malay (Brunei Darussalam). */
3446
+ MsBn = "ms-bn",
3447
+ /** Query language value for Slovenian (Slovenia). */
3448
+ SlSl = "sl-sl",
3449
+ /** Query language value for Tamil (India). */
3450
+ TaIn = "ta-in",
3451
+ /** Query language value for Vietnamese (Viet Nam). */
3452
+ ViVn = "vi-vn",
3453
+ /** Query language value for Greek (Greece). */
3454
+ ElGr = "el-gr",
3455
+ /** Query language value for Romanian (Romania). */
3456
+ RoRo = "ro-ro",
3457
+ /** Query language value for Icelandic (Iceland). */
3458
+ IsIs = "is-is",
3459
+ /** Query language value for Indonesian (Indonesia). */
3460
+ IdId = "id-id",
3461
+ /** Query language value for Thai (Thailand). */
3462
+ ThTh = "th-th",
3463
+ /** Query language value for Lithuanian (Lithuania). */
3464
+ LtLt = "lt-lt",
3465
+ /** Query language value for Ukrainian (Ukraine). */
3466
+ UkUa = "uk-ua",
3467
+ /** Query language value for Latvian (Latvia). */
3468
+ LvLv = "lv-lv",
3469
+ /** Query language value for Estonian (Estonia). */
3470
+ EtEe = "et-ee",
3471
+ /** Query language value for Catalan. */
3472
+ CaEs = "ca-es",
3473
+ /** Query language value for Finnish (Finland). */
3474
+ FiFi = "fi-fi",
3475
+ /** Query language value for Serbian (Bosnia and Herzegovina). */
3476
+ SrBa = "sr-ba",
3477
+ /** Query language value for Serbian (Montenegro). */
3478
+ SrMe = "sr-me",
3479
+ /** Query language value for Serbian (Serbia). */
3480
+ SrRs = "sr-rs",
3481
+ /** Query language value for Slovak (Slovakia). */
3482
+ SkSk = "sk-sk",
3483
+ /** Query language value for Norwegian (Norway). */
3484
+ NbNo = "nb-no",
3485
+ /** Query language value for Armenian (Armenia). */
3486
+ HyAm = "hy-am",
3487
+ /** Query language value for Bengali (India). */
3488
+ BnIn = "bn-in",
3489
+ /** Query language value for Basque. */
3490
+ EuEs = "eu-es",
3491
+ /** Query language value for Galician. */
3492
+ GlEs = "gl-es",
3493
+ /** Query language value for Gujarati (India). */
3494
+ GuIn = "gu-in",
3495
+ /** Query language value for Hebrew (Israel). */
3496
+ HeIl = "he-il",
3497
+ /** Query language value for Irish (Ireland). */
3498
+ GaIe = "ga-ie",
3499
+ /** Query language value for Kannada (India). */
3500
+ KnIn = "kn-in",
3501
+ /** Query language value for Malayalam (India). */
3502
+ MlIn = "ml-in",
3503
+ /** Query language value for Marathi (India). */
3504
+ MrIn = "mr-in",
3505
+ /** Query language value for Persian (U.A.E.). */
3506
+ FaAe = "fa-ae",
3507
+ /** Query language value for Punjabi (India). */
3508
+ PaIn = "pa-in",
3509
+ /** Query language value for Telugu (India). */
3510
+ TeIn = "te-in",
3511
+ /** Query language value for Urdu (Pakistan). */
3512
+ UrPk = "ur-pk"
3513
+ }
3514
+
3515
+ /** Known values of {@link QuerySpellerType} that the service accepts. */
3516
+ export declare enum KnownQuerySpeller {
3517
+ /** Speller not enabled. */
3518
+ None = "none",
3519
+ /** Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter. */
3520
+ Lexicon = "lexicon"
3521
+ }
3522
+
2672
3523
  /** Known values of {@link RegexFlags} that the service accepts. */
2673
3524
  export declare enum KnownRegexFlags {
2674
3525
  /** Enables canonical equivalence. */
@@ -2750,7 +3601,9 @@ export declare enum KnownSearchIndexerDataSourceType {
2750
3601
  /** Indicates a MySql datasource. */
2751
3602
  MySql = "mysql",
2752
3603
  /** Indicates an ADLS Gen2 datasource. */
2753
- AdlsGen2 = "adlsgen2"
3604
+ AdlsGen2 = "adlsgen2",
3605
+ /** Indicates a Microsoft Fabric OneLake datasource. */
3606
+ OneLake = "onelake"
2754
3607
  }
2755
3608
 
2756
3609
  /** Known values of {@link SemanticErrorMode} that the service accepts. */
@@ -2771,6 +3624,22 @@ export declare enum KnownSemanticErrorReason {
2771
3624
  Transient = "transient"
2772
3625
  }
2773
3626
 
3627
+ /** Known values of {@link SemanticFieldState} that the service accepts. */
3628
+ export declare enum KnownSemanticFieldState {
3629
+ /** The field was fully used for semantic enrichment. */
3630
+ Used = "used",
3631
+ /** The field was not used for semantic enrichment. */
3632
+ Unused = "unused",
3633
+ /** The field was partially used for semantic enrichment. */
3634
+ Partial = "partial"
3635
+ }
3636
+
3637
+ /** Known values of {@link SemanticQueryRewritesResultType} that the service accepts. */
3638
+ export declare enum KnownSemanticQueryRewritesResultType {
3639
+ /** Query rewrites were not successfully generated for this request. Only the original query was used to retrieve the results. */
3640
+ OriginalQueryOnly = "originalQueryOnly"
3641
+ }
3642
+
2774
3643
  /** Known values of {@link SemanticSearchResultsType} that the service accepts. */
2775
3644
  export declare enum KnownSemanticSearchResultsType {
2776
3645
  /** Results without any semantic enrichment or reranking. */
@@ -2813,6 +3682,18 @@ export declare enum KnownSentimentSkillLanguage {
2813
3682
  Tr = "tr"
2814
3683
  }
2815
3684
 
3685
+ /** Known values of {@link SplitSkillEncoderModelName} that the service accepts. */
3686
+ export declare enum KnownSplitSkillEncoderModelName {
3687
+ /** Refers to a base model trained with a 50,000 token vocabulary, often used in general natural language processing tasks. */
3688
+ R50KBase = "r50k_base",
3689
+ /** A base model with a 50,000 token vocabulary, optimized for prompt-based tasks. */
3690
+ P50KBase = "p50k_base",
3691
+ /** Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token vocabulary. */
3692
+ P50KEdit = "p50k_edit",
3693
+ /** A base model with a 100,000 token vocabulary. */
3694
+ CL100KBase = "cl100k_base"
3695
+ }
3696
+
2816
3697
  /** Known values of {@link SplitSkillLanguage} that the service accepts. */
2817
3698
  export declare enum KnownSplitSkillLanguage {
2818
3699
  /** Amharic */
@@ -2883,6 +3764,14 @@ export declare enum KnownSplitSkillLanguage {
2883
3764
  Zh = "zh"
2884
3765
  }
2885
3766
 
3767
+ /** Known values of {@link SplitSkillUnit} that the service accepts. */
3768
+ export declare enum KnownSplitSkillUnit {
3769
+ /** The length will be measured by character. */
3770
+ Characters = "characters",
3771
+ /** The length will be measured by an AzureOpenAI tokenizer from the tiktoken library. */
3772
+ AzureOpenAITokens = "azureOpenAITokens"
3773
+ }
3774
+
2886
3775
  /** Known values of {@link TextSplitMode} that the service accepts. */
2887
3776
  export declare enum KnownTextSplitMode {
2888
3777
  /** Split the text into individual pages. */
@@ -3039,105 +3928,257 @@ export declare enum KnownTextTranslationSkillLanguage {
3039
3928
  Pa = "pa"
3040
3929
  }
3041
3930
 
3042
- /** Known values of {@link TokenFilterName} that the service accepts. */
3931
+ /**
3932
+ * Defines values for TokenFilterName.
3933
+ * @readonly
3934
+ */
3043
3935
  export declare enum KnownTokenFilterNames {
3044
- /** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html */
3936
+ /**
3937
+ * A token filter that applies the Arabic normalizer to normalize the orthography. See
3938
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html
3939
+ */
3045
3940
  ArabicNormalization = "arabic_normalization",
3046
- /** Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html */
3941
+ /**
3942
+ * Strips all characters after an apostrophe (including the apostrophe itself). See
3943
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html
3944
+ */
3047
3945
  Apostrophe = "apostrophe",
3048
- /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
3946
+ /**
3947
+ * Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127
3948
+ * ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such
3949
+ * equivalents exist. See
3950
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html
3951
+ */
3049
3952
  AsciiFolding = "asciifolding",
3050
- /** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html */
3953
+ /**
3954
+ * Forms bigrams of CJK terms that are generated from StandardTokenizer. See
3955
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html
3956
+ */
3051
3957
  CjkBigram = "cjk_bigram",
3052
- /** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html */
3958
+ /**
3959
+ * Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic
3960
+ * Latin, and half-width Katakana variants into the equivalent Kana. See
3961
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html
3962
+ */
3053
3963
  CjkWidth = "cjk_width",
3054
- /** Removes English possessives, and dots from acronyms. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html */
3964
+ /**
3965
+ * Removes English possessives, and dots from acronyms. See
3966
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html
3967
+ */
3055
3968
  Classic = "classic",
3056
- /** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html */
3969
+ /**
3970
+ * Construct bigrams for frequently occurring terms while indexing. Single terms are still
3971
+ * indexed too, with bigrams overlaid. See
3972
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html
3973
+ */
3057
3974
  CommonGram = "common_grams",
3058
- /** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html */
3975
+ /**
3976
+ * Generates n-grams of the given size(s) starting from the front or the back of an input token.
3977
+ * See
3978
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html
3979
+ */
3059
3980
  EdgeNGram = "edgeNGram_v2",
3060
- /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
3981
+ /**
3982
+ * Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See
3983
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html
3984
+ */
3061
3985
  Elision = "elision",
3062
- /** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html */
3986
+ /**
3987
+ * Normalizes German characters according to the heuristics of the German2 snowball algorithm.
3988
+ * See
3989
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html
3990
+ */
3063
3991
  GermanNormalization = "german_normalization",
3064
- /** Normalizes text in Hindi to remove some differences in spelling variations. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html */
3992
+ /**
3993
+ * Normalizes text in Hindi to remove some differences in spelling variations. See
3994
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html
3995
+ */
3065
3996
  HindiNormalization = "hindi_normalization",
3066
- /** Normalizes the Unicode representation of text in Indian languages. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html */
3997
+ /**
3998
+ * Normalizes the Unicode representation of text in Indian languages. See
3999
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html
4000
+ */
3067
4001
  IndicNormalization = "indic_normalization",
3068
- /** Emits each incoming token twice, once as keyword and once as non-keyword. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html */
4002
+ /**
4003
+ * Emits each incoming token twice, once as keyword and once as non-keyword. See
4004
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html
4005
+ */
3069
4006
  KeywordRepeat = "keyword_repeat",
3070
- /** A high-performance kstem filter for English. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html */
4007
+ /**
4008
+ * A high-performance kstem filter for English. See
4009
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html
4010
+ */
3071
4011
  KStem = "kstem",
3072
- /** Removes words that are too long or too short. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html */
4012
+ /**
4013
+ * Removes words that are too long or too short. See
4014
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html
4015
+ */
3073
4016
  Length = "length",
3074
- /** Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html */
4017
+ /**
4018
+ * Limits the number of tokens while indexing. See
4019
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html
4020
+ */
3075
4021
  Limit = "limit",
3076
- /** Normalizes token text to lower case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
4022
+ /**
4023
+ * Normalizes token text to lower case. See
4024
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm
4025
+ */
3077
4026
  Lowercase = "lowercase",
3078
- /** Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html */
4027
+ /**
4028
+ * Generates n-grams of the given size(s). See
4029
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html
4030
+ */
3079
4031
  NGram = "nGram_v2",
3080
- /** Applies normalization for Persian. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html */
4032
+ /**
4033
+ * Applies normalization for Persian. See
4034
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html
4035
+ */
3081
4036
  PersianNormalization = "persian_normalization",
3082
- /** Create tokens for phonetic matches. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html */
4037
+ /**
4038
+ * Create tokens for phonetic matches. See
4039
+ * https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html
4040
+ */
3083
4041
  Phonetic = "phonetic",
3084
- /** Uses the Porter stemming algorithm to transform the token stream. See http:\//tartarus.org\/~martin\/PorterStemmer */
4042
+ /**
4043
+ * Uses the Porter stemming algorithm to transform the token stream. See
4044
+ * http://tartarus.org/~martin/PorterStemmer
4045
+ */
3085
4046
  PorterStem = "porter_stem",
3086
- /** Reverses the token string. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
4047
+ /**
4048
+ * Reverses the token string. See
4049
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html
4050
+ */
3087
4051
  Reverse = "reverse",
3088
- /** Normalizes use of the interchangeable Scandinavian characters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html */
4052
+ /**
4053
+ * Normalizes use of the interchangeable Scandinavian characters. See
4054
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html
4055
+ */
3089
4056
  ScandinavianNormalization = "scandinavian_normalization",
3090
- /** Folds Scandinavian characters åÅäæÄÆ-&gt;a and öÖøØ-&gt;o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html */
4057
+ /**
4058
+ * Folds Scandinavian characters åÅäæÄÆ-&gt;a and öÖøØ-&gt;o. It also discriminates against use
4059
+ * of double vowels aa, ae, ao, oe and oo, leaving just the first one. See
4060
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html
4061
+ */
3091
4062
  ScandinavianFoldingNormalization = "scandinavian_folding",
3092
- /** Creates combinations of tokens as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html */
4063
+ /**
4064
+ * Creates combinations of tokens as a single token. See
4065
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html
4066
+ */
3093
4067
  Shingle = "shingle",
3094
- /** A filter that stems words using a Snowball-generated stemmer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html */
4068
+ /**
4069
+ * A filter that stems words using a Snowball-generated stemmer. See
4070
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html
4071
+ */
3095
4072
  Snowball = "snowball",
3096
- /** Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html */
4073
+ /**
4074
+ * Normalizes the Unicode representation of Sorani text. See
4075
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html
4076
+ */
3097
4077
  SoraniNormalization = "sorani_normalization",
3098
- /** Language specific stemming filter. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */
4078
+ /**
4079
+ * Language specific stemming filter. See
4080
+ * https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters
4081
+ */
3099
4082
  Stemmer = "stemmer",
3100
- /** Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html */
4083
+ /**
4084
+ * Removes stop words from a token stream. See
4085
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html
4086
+ */
3101
4087
  Stopwords = "stopwords",
3102
- /** Trims leading and trailing whitespace from tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html */
4088
+ /**
4089
+ * Trims leading and trailing whitespace from tokens. See
4090
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html
4091
+ */
3103
4092
  Trim = "trim",
3104
- /** Truncates the terms to a specific length. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html */
4093
+ /**
4094
+ * Truncates the terms to a specific length. See
4095
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html
4096
+ */
3105
4097
  Truncate = "truncate",
3106
- /** Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html */
4098
+ /**
4099
+ * Filters out tokens with same text as the previous token. See
4100
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html
4101
+ */
3107
4102
  Unique = "unique",
3108
- /** Normalizes token text to upper case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
4103
+ /**
4104
+ * Normalizes token text to upper case. See
4105
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html
4106
+ */
3109
4107
  Uppercase = "uppercase",
3110
- /** Splits words into subwords and performs optional transformations on subword groups. */
4108
+ /**
4109
+ * Splits words into subwords and performs optional transformations on subword groups.
4110
+ */
3111
4111
  WordDelimiter = "word_delimiter"
3112
4112
  }
3113
4113
 
3114
- /** Known values of {@link LexicalTokenizerName} that the service accepts. */
4114
+ /**
4115
+ * Defines values for TokenizerName.
4116
+ * @readonly
4117
+ */
3115
4118
  export declare enum KnownTokenizerNames {
3116
- /** Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html */
4119
+ /**
4120
+ * Grammar-based tokenizer that is suitable for processing most European-language documents. See
4121
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html
4122
+ */
3117
4123
  Classic = "classic",
3118
- /** Tokenizes the input from an edge into n-grams of the given size(s). See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenizer.html */
4124
+ /**
4125
+ * Tokenizes the input from an edge into n-grams of the given size(s). See
4126
+ * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html
4127
+ */
3119
4128
  EdgeNGram = "edgeNGram",
3120
- /** Emits the entire input as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordTokenizer.html */
4129
+ /**
4130
+ * Emits the entire input as a single token. See
4131
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html
4132
+ */
3121
4133
  Keyword = "keyword_v2",
3122
- /** Divides text at non-letters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LetterTokenizer.html */
4134
+ /**
4135
+ * Divides text at non-letters. See
4136
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LetterTokenizer.html
4137
+ */
3123
4138
  Letter = "letter",
3124
- /** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseTokenizer.html */
4139
+ /**
4140
+ * Divides text at non-letters and converts them to lower case. See
4141
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseTokenizer.html
4142
+ */
3125
4143
  Lowercase = "lowercase",
3126
- /** Divides text using language-specific rules. */
4144
+ /**
4145
+ * Divides text using language-specific rules.
4146
+ */
3127
4147
  MicrosoftLanguageTokenizer = "microsoft_language_tokenizer",
3128
- /** Divides text using language-specific rules and reduces words to their base forms. */
4148
+ /**
4149
+ * Divides text using language-specific rules and reduces words to their base forms.
4150
+ */
3129
4151
  MicrosoftLanguageStemmingTokenizer = "microsoft_language_stemming_tokenizer",
3130
- /** Tokenizes the input into n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenizer.html */
4152
+ /**
4153
+ * Tokenizes the input into n-grams of the given size(s). See
4154
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html
4155
+ */
3131
4156
  NGram = "nGram",
3132
- /** Tokenizer for path-like hierarchies. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/path\/PathHierarchyTokenizer.html */
4157
+ /**
4158
+ * Tokenizer for path-like hierarchies. See
4159
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html
4160
+ */
3133
4161
  PathHierarchy = "path_hierarchy_v2",
3134
- /** Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/pattern\/PatternTokenizer.html */
4162
+ /**
4163
+ * Tokenizer that uses regex pattern matching to construct distinct tokens. See
4164
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html
4165
+ */
3135
4166
  Pattern = "pattern",
3136
- /** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/StandardTokenizer.html */
4167
+ /**
4168
+ * Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop
4169
+ * filter. See
4170
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html
4171
+ */
3137
4172
  Standard = "standard_v2",
3138
- /** Tokenizes urls and emails as one token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/UAX29URLEmailTokenizer.html */
4173
+ /**
4174
+ * Tokenizes urls and emails as one token. See
4175
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html
4176
+ */
3139
4177
  UaxUrlEmail = "uax_url_email",
3140
- /** Divides text at whitespace. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html */
4178
+ /**
4179
+ * Divides text at whitespace. See
4180
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html
4181
+ */
3141
4182
  Whitespace = "whitespace"
3142
4183
  }
3143
4184
 
@@ -3160,7 +4201,11 @@ export declare enum KnownVectorQueryKind {
3160
4201
  /** Vector query where a raw vector value is provided. */
3161
4202
  Vector = "vector",
3162
4203
  /** Vector query where a text value that needs to be vectorized is provided. */
3163
- Text = "text"
4204
+ Text = "text",
4205
+ /** Vector query where an url that represents an image value that needs to be vectorized is provided. */
4206
+ ImageUrl = "imageUrl",
4207
+ /** Vector query where a base 64 encoded binary of an image that needs to be vectorized is provided. */
4208
+ ImageBinary = "imageBinary"
3164
4209
  }
3165
4210
 
3166
4211
  /** Known values of {@link VectorSearchAlgorithmKind} that the service accepts. */
@@ -3191,6 +4236,14 @@ export declare enum KnownVectorSearchCompressionKind {
3191
4236
  BinaryQuantization = "binaryQuantization"
3192
4237
  }
3193
4238
 
4239
+ /** Known values of {@link VectorSearchCompressionRescoreStorageMethod} that the service accepts. */
4240
+ export declare enum KnownVectorSearchCompressionRescoreStorageMethod {
4241
+ /** This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. */
4242
+ PreserveOriginals = "preserveOriginals",
4243
+ /** This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality. */
4244
+ DiscardOriginals = "discardOriginals"
4245
+ }
4246
+
3194
4247
  /** Known values of {@link VectorSearchCompressionTarget} that the service accepts. */
3195
4248
  export declare enum KnownVectorSearchCompressionTarget {
3196
4249
  /** Int8 */
@@ -3202,7 +4255,19 @@ export declare enum KnownVectorSearchVectorizerKind {
3202
4255
  /** Generate embeddings using an Azure OpenAI resource at query time. */
3203
4256
  AzureOpenAI = "azureOpenAI",
3204
4257
  /** Generate embeddings using a custom web endpoint at query time. */
3205
- CustomWebApi = "customWebApi"
4258
+ CustomWebApi = "customWebApi",
4259
+ /** Generate embeddings for an image or text input at query time using the Azure AI Services Vision Vectorize API. */
4260
+ AIServicesVision = "aiServicesVision",
4261
+ /** Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Studio Model Catalog at query time. */
4262
+ AML = "aml"
4263
+ }
4264
+
4265
+ /** Known values of {@link VectorThresholdKind} that the service accepts. */
4266
+ export declare enum KnownVectorThresholdKind {
4267
+ /** The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */
4268
+ VectorSimilarity = "vectorSimilarity",
4269
+ /** The results of the vector query will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. */
4270
+ SearchScore = "searchScore"
3206
4271
  }
3207
4272
 
3208
4273
  /** Known values of {@link VisualFeature} that the service accepts. */
@@ -3349,6 +4414,24 @@ export declare type LexicalAnalyzer = CustomAnalyzer | PatternAnalyzer | LuceneS
3349
4414
  */
3350
4415
  export declare type LexicalAnalyzerName = string;
3351
4416
 
4417
+ /**
4418
+ * Contains the possible cases for LexicalNormalizer.
4419
+ */
4420
+ export declare type LexicalNormalizer = CustomNormalizer;
4421
+
4422
+ /**
4423
+ * Defines values for LexicalNormalizerName. \
4424
+ * {@link KnownLexicalNormalizerName} can be used interchangeably with LexicalNormalizerName,
4425
+ * this enum contains the known values that the service supports.
4426
+ * ### Known values supported by the service
4427
+ * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \
4428
+ * **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \
4429
+ * **lowercase**: Normalizes token text to lowercase. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \
4430
+ * **standard**: Standard normalizer, which consists of lowercase and asciifolding. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \
4431
+ * **uppercase**: Normalizes token text to uppercase. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html
4432
+ */
4433
+ export declare type LexicalNormalizerName = string;
4434
+
3352
4435
  /**
3353
4436
  * Contains the possible cases for Tokenizer.
3354
4437
  */
@@ -3385,6 +4468,11 @@ export declare interface LimitTokenFilter extends BaseTokenFilter {
3385
4468
  consumeAllTokens?: boolean;
3386
4469
  }
3387
4470
 
4471
+ /**
4472
+ * Options for list aliases operation.
4473
+ */
4474
+ export declare type ListAliasesOptions = OperationOptions;
4475
+
3388
4476
  /**
3389
4477
  * Options for a list data sources operation.
3390
4478
  */
@@ -3479,6 +4567,30 @@ export declare interface MappingCharFilter extends BaseCharFilter {
3479
4567
  mappings: string[];
3480
4568
  }
3481
4569
 
4570
+ /**
4571
+ * Defines values for MarkdownHeaderDepth. \
4572
+ * {@link KnownMarkdownHeaderDepth} can be used interchangeably with MarkdownHeaderDepth,
4573
+ * this enum contains the known values that the service supports.
4574
+ * ### Known values supported by the service
4575
+ * **h1**: Indicates that headers up to a level of h1 will be considered while grouping markdown content. \
4576
+ * **h2**: Indicates that headers up to a level of h2 will be considered while grouping markdown content. \
4577
+ * **h3**: Indicates that headers up to a level of h3 will be considered while grouping markdown content. \
4578
+ * **h4**: Indicates that headers up to a level of h4 will be considered while grouping markdown content. \
4579
+ * **h5**: Indicates that headers up to a level of h5 will be considered while grouping markdown content. \
4580
+ * **h6**: Indicates that headers up to a level of h6 will be considered while grouping markdown content. This is the default.
4581
+ */
4582
+ export declare type MarkdownHeaderDepth = string;
4583
+
4584
+ /**
4585
+ * Defines values for MarkdownParsingSubmode. \
4586
+ * {@link KnownMarkdownParsingSubmode} can be used interchangeably with MarkdownParsingSubmode,
4587
+ * this enum contains the known values that the service supports.
4588
+ * ### Known values supported by the service
4589
+ * **oneToMany**: Indicates that each section of the markdown file (up to a specified depth) will be parsed into individual search documents. This can result in a single markdown file producing multiple search documents. This is the default sub-mode. \
4590
+ * **oneToOne**: Indicates that each markdown file will be parsed into a single search document.
4591
+ */
4592
+ export declare type MarkdownParsingSubmode = string;
4593
+
3482
4594
  /**
3483
4595
  * Options for the merge documents operation.
3484
4596
  */
@@ -3534,6 +4646,12 @@ export declare type MicrosoftTokenizerLanguage = "bangla" | "bulgarian" | "catal
3534
4646
  */
3535
4647
  export declare type NarrowedModel<TModel extends object, TFields extends SelectFields<TModel> = SelectFields<TModel>> = (<T>() => T extends TModel ? true : false) extends <T>() => T extends never ? true : false ? TModel : (<T>() => T extends TModel ? true : false) extends <T>() => T extends object ? true : false ? TModel : (<T>() => T extends TModel ? true : false) extends <T>() => T extends any ? true : false ? TModel : (<T>() => T extends TModel ? true : false) extends <T>() => T extends unknown ? true : false ? TModel : (<T>() => T extends TFields ? true : false) extends <T>() => T extends never ? true : false ? never : (<T>() => T extends TFields ? true : false) extends <T>() => T extends SelectFields<TModel> ? true : false ? TModel : SearchPick<TModel, TFields>;
3536
4648
 
4649
+ /** Defines a data deletion detection policy utilizing Azure Blob Storage's native soft delete feature for deletion detection. */
4650
+ export declare interface NativeBlobSoftDeleteDeletionDetectionPolicy extends BaseDataDeletionDetectionPolicy {
4651
+ /** Polymorphic discriminator, which specifies the different types this object can be */
4652
+ odatatype: "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy";
4653
+ }
4654
+
3537
4655
  /**
3538
4656
  * Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene.
3539
4657
  */
@@ -3571,6 +4689,16 @@ export declare interface NGramTokenizer extends BaseLexicalTokenizer {
3571
4689
  tokenChars?: TokenCharacterKind[];
3572
4690
  }
3573
4691
 
4692
+ /**
4693
+ * Specifies the properties for connecting to an AML vectorizer with no authentication.
4694
+ */
4695
+ export declare interface NoAuthAzureMachineLearningVectorizerParameters extends BaseAzureMachineLearningVectorizerParameters {
4696
+ /** Indicates how the service should attempt to identify itself to the AML instance */
4697
+ authKind: "none";
4698
+ /** The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */
4699
+ scoringUri: string;
4700
+ }
4701
+
3574
4702
  /**
3575
4703
  * Defines values for OcrLineEnding. \
3576
4704
  * {@link KnownOcrLineEnding} can be used interchangeably with OcrLineEnding,
@@ -3587,10 +4715,12 @@ export declare type OcrLineEnding = string;
3587
4715
  export declare interface OcrSkill extends BaseSearchIndexerSkill {
3588
4716
  /** Polymorphic discriminator, which specifies the different types this object can be */
3589
4717
  odatatype: "#Microsoft.Skills.Vision.OcrSkill";
3590
- /** A value indicating which language code to use. Default is en. */
4718
+ /** A value indicating which language code to use. Default is `en`. */
3591
4719
  defaultLanguageCode?: OcrSkillLanguage;
3592
4720
  /** A value indicating to turn orientation detection on or not. Default is false. */
3593
4721
  shouldDetectOrientation?: boolean;
4722
+ /** Defines the sequence of characters to use between the lines of text recognized by the OCR skill. The default value is "space". */
4723
+ lineEnding?: OcrLineEnding;
3594
4724
  }
3595
4725
 
3596
4726
  export declare type OcrSkillLanguage = `${KnownOcrSkillLanguage}`;
@@ -3822,16 +4952,234 @@ export declare interface QueryCaptionResult {
3822
4952
  readonly highlights?: string;
3823
4953
  }
3824
4954
 
4955
+ /**
4956
+ * Defines values for QueryDebugMode. \
4957
+ * {@link KnownQueryDebugMode} can be used interchangeably with QueryDebugMode,
4958
+ * this enum contains the known values that the service supports.
4959
+ * ### Known values supported by the service
4960
+ * **disabled**: No query debugging information will be returned. \
4961
+ * **semantic**: Allows the user to further explore their reranked results. \
4962
+ * **vector**: Allows the user to further explore their hybrid and vector query results. \
4963
+ * **queryRewrites**: Allows the user to explore the list of query rewrites generated for their search request. \
4964
+ * **all**: Turn on all debug options.
4965
+ */
4966
+ export declare type QueryDebugMode = string;
4967
+
4968
+ /**
4969
+ * Defines values for QueryLanguage. \
4970
+ * {@link KnownQueryLanguage} can be used interchangeably with QueryLanguage,
4971
+ * this enum contains the known values that the service supports.
4972
+ * ### Known values supported by the service
4973
+ * **none**: Query language not specified. \
4974
+ * **en-us**: Query language value for English (United States). \
4975
+ * **en-gb**: Query language value for English (Great Britain). \
4976
+ * **en-in**: Query language value for English (India). \
4977
+ * **en-ca**: Query language value for English (Canada). \
4978
+ * **en-au**: Query language value for English (Australia). \
4979
+ * **fr-fr**: Query language value for French (France). \
4980
+ * **fr-ca**: Query language value for French (Canada). \
4981
+ * **de-de**: Query language value for German (Germany). \
4982
+ * **es-es**: Query language value for Spanish (Spain). \
4983
+ * **es-mx**: Query language value for Spanish (Mexico). \
4984
+ * **zh-cn**: Query language value for Chinese (China). \
4985
+ * **zh-tw**: Query language value for Chinese (Taiwan). \
4986
+ * **pt-br**: Query language value for Portuguese (Brazil). \
4987
+ * **pt-pt**: Query language value for Portuguese (Portugal). \
4988
+ * **it-it**: Query language value for Italian (Italy). \
4989
+ * **ja-jp**: Query language value for Japanese (Japan). \
4990
+ * **ko-kr**: Query language value for Korean (Korea). \
4991
+ * **ru-ru**: Query language value for Russian (Russia). \
4992
+ * **cs-cz**: Query language value for Czech (Czech Republic). \
4993
+ * **nl-be**: Query language value for Dutch (Belgium). \
4994
+ * **nl-nl**: Query language value for Dutch (Netherlands). \
4995
+ * **hu-hu**: Query language value for Hungarian (Hungary). \
4996
+ * **pl-pl**: Query language value for Polish (Poland). \
4997
+ * **sv-se**: Query language value for Swedish (Sweden). \
4998
+ * **tr-tr**: Query language value for Turkish (Turkey). \
4999
+ * **hi-in**: Query language value for Hindi (India). \
5000
+ * **ar-sa**: Query language value for Arabic (Saudi Arabia). \
5001
+ * **ar-eg**: Query language value for Arabic (Egypt). \
5002
+ * **ar-ma**: Query language value for Arabic (Morocco). \
5003
+ * **ar-kw**: Query language value for Arabic (Kuwait). \
5004
+ * **ar-jo**: Query language value for Arabic (Jordan). \
5005
+ * **da-dk**: Query language value for Danish (Denmark). \
5006
+ * **no-no**: Query language value for Norwegian (Norway). \
5007
+ * **bg-bg**: Query language value for Bulgarian (Bulgaria). \
5008
+ * **hr-hr**: Query language value for Croatian (Croatia). \
5009
+ * **hr-ba**: Query language value for Croatian (Bosnia and Herzegovina). \
5010
+ * **ms-my**: Query language value for Malay (Malaysia). \
5011
+ * **ms-bn**: Query language value for Malay (Brunei Darussalam). \
5012
+ * **sl-sl**: Query language value for Slovenian (Slovenia). \
5013
+ * **ta-in**: Query language value for Tamil (India). \
5014
+ * **vi-vn**: Query language value for Vietnamese (Viet Nam). \
5015
+ * **el-gr**: Query language value for Greek (Greece). \
5016
+ * **ro-ro**: Query language value for Romanian (Romania). \
5017
+ * **is-is**: Query language value for Icelandic (Iceland). \
5018
+ * **id-id**: Query language value for Indonesian (Indonesia). \
5019
+ * **th-th**: Query language value for Thai (Thailand). \
5020
+ * **lt-lt**: Query language value for Lithuanian (Lithuania). \
5021
+ * **uk-ua**: Query language value for Ukrainian (Ukraine). \
5022
+ * **lv-lv**: Query language value for Latvian (Latvia). \
5023
+ * **et-ee**: Query language value for Estonian (Estonia). \
5024
+ * **ca-es**: Query language value for Catalan. \
5025
+ * **fi-fi**: Query language value for Finnish (Finland). \
5026
+ * **sr-ba**: Query language value for Serbian (Bosnia and Herzegovina). \
5027
+ * **sr-me**: Query language value for Serbian (Montenegro). \
5028
+ * **sr-rs**: Query language value for Serbian (Serbia). \
5029
+ * **sk-sk**: Query language value for Slovak (Slovakia). \
5030
+ * **nb-no**: Query language value for Norwegian (Norway). \
5031
+ * **hy-am**: Query language value for Armenian (Armenia). \
5032
+ * **bn-in**: Query language value for Bengali (India). \
5033
+ * **eu-es**: Query language value for Basque. \
5034
+ * **gl-es**: Query language value for Galician. \
5035
+ * **gu-in**: Query language value for Gujarati (India). \
5036
+ * **he-il**: Query language value for Hebrew (Israel). \
5037
+ * **ga-ie**: Query language value for Irish (Ireland). \
5038
+ * **kn-in**: Query language value for Kannada (India). \
5039
+ * **ml-in**: Query language value for Malayalam (India). \
5040
+ * **mr-in**: Query language value for Marathi (India). \
5041
+ * **fa-ae**: Query language value for Persian (U.A.E.). \
5042
+ * **pa-in**: Query language value for Punjabi (India). \
5043
+ * **te-in**: Query language value for Telugu (India). \
5044
+ * **ur-pk**: Query language value for Urdu (Pakistan).
5045
+ */
5046
+ export declare type QueryLanguage = string;
5047
+
5048
+ /** The raw concatenated strings that were sent to the semantic enrichment process. */
5049
+ export declare interface QueryResultDocumentRerankerInput {
5050
+ /**
5051
+ * The raw string for the title field that was used for semantic enrichment.
5052
+ * NOTE: This property will not be serialized. It can only be populated by the server.
5053
+ */
5054
+ readonly title?: string;
5055
+ /**
5056
+ * The raw concatenated strings for the content fields that were used for semantic enrichment.
5057
+ * NOTE: This property will not be serialized. It can only be populated by the server.
5058
+ */
5059
+ readonly content?: string;
5060
+ /**
5061
+ * The raw concatenated strings for the keyword fields that were used for semantic enrichment.
5062
+ * NOTE: This property will not be serialized. It can only be populated by the server.
5063
+ */
5064
+ readonly keywords?: string;
5065
+ }
5066
+
5067
+ /** Description of fields that were sent to the semantic enrichment process, as well as how they were used */
5068
+ export declare interface QueryResultDocumentSemanticField {
5069
+ /**
5070
+ * The name of the field that was sent to the semantic enrichment process
5071
+ * NOTE: This property will not be serialized. It can only be populated by the server.
5072
+ */
5073
+ readonly name?: string;
5074
+ /**
5075
+ * The way the field was used for the semantic enrichment process (fully used, partially used, or unused)
5076
+ * NOTE: This property will not be serialized. It can only be populated by the server.
5077
+ */
5078
+ readonly state?: SemanticFieldState;
5079
+ }
5080
+
5081
+ /** The breakdown of subscores between the text and vector query components of the search query for this document. Each vector query is shown as a separate object in the same order they were received. */
5082
+ export declare interface QueryResultDocumentSubscores {
5083
+ /**
5084
+ * The BM25 or Classic score for the text portion of the query.
5085
+ * NOTE: This property will not be serialized. It can only be populated by the server.
5086
+ */
5087
+ readonly text?: TextResult;
5088
+ /**
5089
+ * The vector similarity and @search.score values for each vector query.
5090
+ * NOTE: This property will not be serialized. It can only be populated by the server.
5091
+ */
5092
+ readonly vectors?: {
5093
+ [propertyName: string]: SingleVectorFieldResult;
5094
+ }[];
5095
+ /**
5096
+ * The BM25 or Classic score for the text portion of the query.
5097
+ * NOTE: This property will not be serialized. It can only be populated by the server.
5098
+ */
5099
+ readonly documentBoost?: number;
5100
+ }
5101
+
5102
+ /** Defines options for query rewrites. */
5103
+ export declare type QueryRewrites = GenerativeQueryRewrites;
5104
+
5105
+ /** Contains debugging information specific to query rewrites. */
5106
+ export declare interface QueryRewritesDebugInfo {
5107
+ /**
5108
+ * List of query rewrites generated for the text query.
5109
+ * NOTE: This property will not be serialized. It can only be populated by the server.
5110
+ */
5111
+ readonly text?: QueryRewritesValuesDebugInfo;
5112
+ /**
5113
+ * List of query rewrites generated for the vectorizable text queries.
5114
+ * NOTE: This property will not be serialized. It can only be populated by the server.
5115
+ */
5116
+ readonly vectors?: QueryRewritesValuesDebugInfo[];
5117
+ }
5118
+
5119
+ /** Contains debugging information specific to query rewrites. */
5120
+ export declare interface QueryRewritesValuesDebugInfo {
5121
+ /**
5122
+ * The input text to the generative query rewriting model. There may be cases where the user query and the input to the generative model are not identical.
5123
+ * NOTE: This property will not be serialized. It can only be populated by the server.
5124
+ */
5125
+ readonly inputQuery?: string;
5126
+ /**
5127
+ * List of query rewrites.
5128
+ * NOTE: This property will not be serialized. It can only be populated by the server.
5129
+ */
5130
+ readonly rewrites?: string[];
5131
+ }
5132
+
5133
+ /**
5134
+ * Defines values for QuerySpellerType. \
5135
+ * {@link KnownQuerySpellerType} can be used interchangeably with QuerySpellerType,
5136
+ * this enum contains the known values that the service supports.
5137
+ * ### Known values supported by the service
5138
+ * **none**: Speller not enabled. \
5139
+ * **lexicon**: Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter.
5140
+ */
5141
+ export declare type QuerySpeller = string;
5142
+
3825
5143
  /** Defines values for QueryType. */
3826
5144
  export declare type QueryType = "simple" | "full" | "semantic";
3827
5145
 
3828
5146
  export declare type RegexFlags = `${KnownRegexFlags}`;
3829
5147
 
5148
+ /** Contains the options for rescoring. */
5149
+ export declare interface RescoringOptions {
5150
+ /** If set to true, after the initial search on the compressed vectors, the similarity scores are recalculated using the full-precision vectors. This will improve recall at the expense of latency. */
5151
+ enableRescoring?: boolean;
5152
+ /** Default oversampling factor. Oversampling retrieves a greater set of potential documents to offset the resolution loss due to quantization. This increases the set of results that will be rescored on full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when 'enableRescoring' is true. Higher values improve recall at the expense of latency. */
5153
+ defaultOversampling?: number;
5154
+ /** Controls the storage method for original vectors. This setting is immutable. */
5155
+ rescoreStorageMethod?: VectorSearchCompressionRescoreStorageMethod;
5156
+ }
5157
+
5158
+ /**
5159
+ * Options for reset docs operation.
5160
+ */
5161
+ export declare interface ResetDocumentsOptions extends OperationOptions {
5162
+ /** document keys to be reset */
5163
+ documentKeys?: string[];
5164
+ /** datasource document identifiers to be reset */
5165
+ datasourceDocumentIds?: string[];
5166
+ /** If false, keys or ids will be appended to existing ones. If true, only the keys or ids in this payload will be queued to be re-ingested. */
5167
+ overwrite?: boolean;
5168
+ }
5169
+
3830
5170
  /**
3831
5171
  * Options for reset indexer operation.
3832
5172
  */
3833
5173
  export declare type ResetIndexerOptions = OperationOptions;
3834
5174
 
5175
+ /**
5176
+ * Options for reset skills operation.
5177
+ */
5178
+ export declare interface ResetSkillsOptions extends OperationOptions {
5179
+ /** the names of skills to be reset. */
5180
+ skillNames?: string[];
5181
+ }
5182
+
3835
5183
  /** Represents a resource's usage and quota. */
3836
5184
  export declare interface ResourceCounter {
3837
5185
  /** The resource usage amount. */
@@ -3897,6 +5245,16 @@ export declare interface ScoringProfile {
3897
5245
  /** Defines values for ScoringStatistics. */
3898
5246
  export declare type ScoringStatistics = "local" | "global";
3899
5247
 
5248
+ /** Represents an index alias, which describes a mapping from the alias name to an index. The alias name can be used in place of the index name for supported operations. */
5249
+ export declare interface SearchAlias {
5250
+ /** The name of the alias. */
5251
+ name: string;
5252
+ /** The name of the index this alias maps to. Only one index name may be specified. */
5253
+ indexes: string[];
5254
+ /** The ETag of the alias. */
5255
+ etag?: string;
5256
+ }
5257
+
3900
5258
  /**
3901
5259
  * Class used to perform operations against a search index,
3902
5260
  * including querying documents in the index as well as
@@ -3925,6 +5283,10 @@ export declare class SearchClient<TModel extends object> implements IndexDocumen
3925
5283
  * A reference to the auto-generated SearchClient
3926
5284
  */
3927
5285
  private readonly client;
5286
+ /**
5287
+ * A reference to the internal HTTP pipeline for use with raw requests
5288
+ */
5289
+ readonly pipeline: Pipeline;
3928
5290
  /**
3929
5291
  * Creates an instance of SearchClient.
3930
5292
  *
@@ -4131,10 +5493,12 @@ export declare class SearchClient<TModel extends object> implements IndexDocumen
4131
5493
  private convertSelect;
4132
5494
  private convertVectorQueryFields;
4133
5495
  private convertSearchFields;
5496
+ private convertSemanticFields;
4134
5497
  private convertOrderBy;
4135
5498
  private convertQueryAnswers;
4136
5499
  private convertQueryCaptions;
4137
5500
  private convertVectorQuery;
5501
+ private convertQueryRewrites;
4138
5502
  }
4139
5503
 
4140
5504
  /**
@@ -4217,6 +5581,11 @@ export declare interface SearchDocumentsResultBase {
4217
5581
  * NOTE: This property will not be serialized. It can only be populated by the server.
4218
5582
  */
4219
5583
  readonly answers?: QueryAnswerResult[];
5584
+ /**
5585
+ * Debug information that applies to the search results as a whole.
5586
+ * NOTE: This property will not be serialized. It can only be populated by the server.
5587
+ */
5588
+ readonly debugInfo?: DebugInfo;
4220
5589
  /**
4221
5590
  * Reason that a partial response was returned for a semantic search request.
4222
5591
  * NOTE: This property will not be serialized. It can only be populated by the server.
@@ -4227,6 +5596,11 @@ export declare interface SearchDocumentsResultBase {
4227
5596
  * NOTE: This property will not be serialized. It can only be populated by the server.
4228
5597
  */
4229
5598
  readonly semanticSearchResultsType?: SemanticSearchResultsType;
5599
+ /**
5600
+ * Type of query rewrite that was used to retrieve documents.
5601
+ * NOTE: This property will not be serialized. It can only be populated by the server.
5602
+ */
5603
+ readonly semanticQueryRewritesResultType?: SemanticQueryRewritesResultType;
4230
5604
  }
4231
5605
 
4232
5606
  /**
@@ -4329,6 +5703,10 @@ export declare interface SearchIndex {
4329
5703
  * The character filters for the index.
4330
5704
  */
4331
5705
  charFilters?: CharFilter[];
5706
+ /**
5707
+ * The normalizers for the index.
5708
+ */
5709
+ normalizers?: LexicalNormalizer[];
4332
5710
  /**
4333
5711
  * A description of an encryption key that you create in Azure Key Vault. This key is used to
4334
5712
  * provide an additional level of encryption-at-rest for your data when you want full assurance
@@ -4360,6 +5738,11 @@ export declare interface SearchIndex {
4360
5738
  etag?: string;
4361
5739
  }
4362
5740
 
5741
+ /**
5742
+ * Search Alias object.
5743
+ */
5744
+ export declare type SearchIndexAlias = SearchAlias;
5745
+
4363
5746
  /**
4364
5747
  * Class to perform operations to manage
4365
5748
  * (create, update, list/delete)
@@ -4384,6 +5767,10 @@ export declare class SearchIndexClient {
4384
5767
  * A reference to the auto-generated SearchServiceClient
4385
5768
  */
4386
5769
  private readonly client;
5770
+ /**
5771
+ * A reference to the internal HTTP pipeline for use with raw requests
5772
+ */
5773
+ readonly pipeline: Pipeline;
4387
5774
  /**
4388
5775
  * Used to authenticate requests to the service.
4389
5776
  */
@@ -4416,6 +5803,13 @@ export declare class SearchIndexClient {
4416
5803
  * @param options - Options to the list index operation.
4417
5804
  */
4418
5805
  listIndexes(options?: ListIndexesOptions): IndexIterator;
5806
+ private listAliasesPage;
5807
+ private listAliasesAll;
5808
+ /**
5809
+ * Lists all aliases available for a search service.
5810
+ * @param options - The options parameters.
5811
+ */
5812
+ listAliases(options?: ListAliasesOptions): AliasIterator;
4419
5813
  private listIndexesNamesPage;
4420
5814
  private listIndexesNamesAll;
4421
5815
  /**
@@ -4481,6 +5875,31 @@ export declare class SearchIndexClient {
4481
5875
  * @param options - Additional optional arguments.
4482
5876
  */
4483
5877
  deleteSynonymMap(synonymMap: string | SynonymMap, options?: DeleteSynonymMapOptions): Promise<void>;
5878
+ /**
5879
+ * Creates a new search alias or updates an alias if it already exists.
5880
+ * @param alias - The definition of the alias to create or update.
5881
+ * @param options - The options parameters.
5882
+ */
5883
+ createOrUpdateAlias(alias: SearchIndexAlias, options?: CreateOrUpdateAliasOptions): Promise<SearchIndexAlias>;
5884
+ /**
5885
+ * Creates a new search alias.
5886
+ * @param alias - The definition of the alias to create.
5887
+ * @param options - The options parameters.
5888
+ */
5889
+ createAlias(alias: SearchIndexAlias, options?: CreateAliasOptions): Promise<SearchIndexAlias>;
5890
+ /**
5891
+ * Deletes a search alias and its associated mapping to an index. This operation is permanent, with no
5892
+ * recovery option. The mapped index is untouched by this operation.
5893
+ * @param alias - Alias/Name name of the alias to delete.
5894
+ * @param options - The options parameters.
5895
+ */
5896
+ deleteAlias(alias: string | SearchIndexAlias, options?: DeleteAliasOptions): Promise<void>;
5897
+ /**
5898
+ * Retrieves an alias definition.
5899
+ * @param aliasName - The name of the alias to retrieve.
5900
+ * @param options - The options parameters.
5901
+ */
5902
+ getAlias(aliasName: string, options?: GetAliasOptions): Promise<SearchIndexAlias>;
4484
5903
  /**
4485
5904
  * Retrieves statistics about an index, such as the count of documents and the size
4486
5905
  * of index storage.
@@ -4594,6 +6013,29 @@ export declare interface SearchIndexer {
4594
6013
  * paid services created on or after January 1, 2019.
4595
6014
  */
4596
6015
  encryptionKey?: SearchResourceEncryptionKey;
6016
+ /**
6017
+ * Adds caching to an enrichment pipeline to allow for incremental modification steps without
6018
+ * having to rebuild the index every time.
6019
+ */
6020
+ cache?: SearchIndexerCache;
6021
+ }
6022
+
6023
+ export declare interface SearchIndexerCache {
6024
+ /**
6025
+ * The connection string to the storage account where the cache data will be persisted.
6026
+ */
6027
+ storageConnectionString?: string;
6028
+ /**
6029
+ * Specifies whether incremental reprocessing is enabled.
6030
+ */
6031
+ enableReprocessing?: boolean;
6032
+ /** The user-assigned managed identity used for connections to the enrichment cache. If the
6033
+ * connection string indicates an identity (ResourceId) and it's not specified, the
6034
+ * system-assigned managed identity is used. On updates to the indexer, if the identity is
6035
+ * unspecified, the value remains unchanged. If set to "none", the value of this property is
6036
+ * cleared.
6037
+ */
6038
+ identity?: SearchIndexerDataIdentity;
4597
6039
  }
4598
6040
 
4599
6041
  /**
@@ -4620,6 +6062,10 @@ export declare class SearchIndexerClient {
4620
6062
  * A reference to the auto-generated SearchServiceClient
4621
6063
  */
4622
6064
  private readonly client;
6065
+ /**
6066
+ * A reference to the internal HTTP pipeline for use with raw requests
6067
+ */
6068
+ readonly pipeline: Pipeline;
4623
6069
  /**
4624
6070
  * Creates an instance of SearchIndexerClient.
4625
6071
  *
@@ -4757,6 +6203,19 @@ export declare class SearchIndexerClient {
4757
6203
  * @param options - Additional optional arguments.
4758
6204
  */
4759
6205
  runIndexer(indexerName: string, options?: RunIndexerOptions): Promise<void>;
6206
+ /**
6207
+ * Resets specific documents in the datasource to be selectively re-ingested by the indexer.
6208
+ * @param indexerName - The name of the indexer to reset documents for.
6209
+ * @param options - Additional optional arguments.
6210
+ */
6211
+ resetDocuments(indexerName: string, options?: ResetDocumentsOptions): Promise<void>;
6212
+ /**
6213
+ * Reset an existing skillset in a search service.
6214
+ * @param skillsetName - The name of the skillset to reset.
6215
+ * @param skillNames - The names of skills to reset.
6216
+ * @param options - The options parameters.
6217
+ */
6218
+ resetSkills(skillsetName: string, options?: ResetSkillsOptions): Promise<void>;
4760
6219
  }
4761
6220
 
4762
6221
  /**
@@ -5023,7 +6482,7 @@ export declare interface SearchIndexerLimits {
5023
6482
  /**
5024
6483
  * Contains the possible cases for Skill.
5025
6484
  */
5026
- export declare type SearchIndexerSkill = AzureOpenAIEmbeddingSkill | ConditionalSkill | CustomEntityLookupSkill | DocumentExtractionSkill | EntityLinkingSkill | EntityRecognitionSkill | EntityRecognitionSkillV3 | ImageAnalysisSkill | KeyPhraseExtractionSkill | LanguageDetectionSkill | MergeSkill | OcrSkill | PIIDetectionSkill | SentimentSkill | SentimentSkillV3 | ShaperSkill | SplitSkill | TextTranslationSkill | WebApiSkill;
6485
+ export declare type SearchIndexerSkill = AzureMachineLearningSkill | AzureOpenAIEmbeddingSkill | ConditionalSkill | CustomEntityLookupSkill | DocumentExtractionSkill | DocumentIntelligenceLayoutSkill | EntityLinkingSkill | EntityRecognitionSkill | EntityRecognitionSkillV3 | ImageAnalysisSkill | KeyPhraseExtractionSkill | LanguageDetectionSkill | MergeSkill | OcrSkill | PIIDetectionSkill | SentimentSkill | SentimentSkillV3 | ShaperSkill | SplitSkill | TextTranslationSkill | VisionVectorizeSkill | WebApiSkill;
5027
6486
 
5028
6487
  /**
5029
6488
  * A list of skills.
@@ -5478,8 +6937,21 @@ export declare type SearchResult<TModel extends object, TFields extends SelectFi
5478
6937
  */
5479
6938
  readonly captions?: QueryCaptionResult[];
5480
6939
  document: NarrowedModel<TModel, TFields>;
6940
+ /**
6941
+ * Contains debugging information that can be used to further explore your search results.
6942
+ * NOTE: This property will not be serialized. It can only be populated by the server.
6943
+ */
6944
+ readonly documentDebugInfo?: DocumentDebugInfo;
5481
6945
  };
5482
6946
 
6947
+ /** The results of the vector query will filter based on the '\@search.score' value. Note this is the \@search.score returned as part of the search response. The threshold direction will be chosen for higher \@search.score. */
6948
+ export declare interface SearchScoreThreshold extends BaseVectorThreshold {
6949
+ /** Polymorphic discriminator, which specifies the different types this object can be */
6950
+ kind: "searchScore";
6951
+ /** The threshold will filter based on the '\@search.score' value. Note this is the \@search.score returned as part of the search response. The threshold direction will be chosen for higher \@search.score. */
6952
+ value: number;
6953
+ }
6954
+
5483
6955
  /**
5484
6956
  * Response from a get service statistics request. If successful, it includes service level
5485
6957
  * counters and limits.
@@ -5527,6 +6999,32 @@ export declare interface SemanticConfiguration {
5527
6999
  prioritizedFields: SemanticPrioritizedFields;
5528
7000
  }
5529
7001
 
7002
+ /**
7003
+ * Debug options for semantic search queries.
7004
+ */
7005
+ export declare interface SemanticDebugInfo {
7006
+ /**
7007
+ * The title field that was sent to the semantic enrichment process, as well as how it was used
7008
+ * NOTE: This property will not be serialized. It can only be populated by the server.
7009
+ */
7010
+ readonly titleField?: QueryResultDocumentSemanticField;
7011
+ /**
7012
+ * The content fields that were sent to the semantic enrichment process, as well as how they were used
7013
+ * NOTE: This property will not be serialized. It can only be populated by the server.
7014
+ */
7015
+ readonly contentFields?: QueryResultDocumentSemanticField[];
7016
+ /**
7017
+ * The keyword fields that were sent to the semantic enrichment process, as well as how they were used
7018
+ * NOTE: This property will not be serialized. It can only be populated by the server.
7019
+ */
7020
+ readonly keywordFields?: QueryResultDocumentSemanticField[];
7021
+ /**
7022
+ * The raw concatenated strings that were sent to the semantic enrichment process.
7023
+ * NOTE: This property will not be serialized. It can only be populated by the server.
7024
+ */
7025
+ readonly rerankerInput?: QueryResultDocumentRerankerInput;
7026
+ }
7027
+
5530
7028
  export declare type SemanticErrorMode = `${KnownSemanticErrorMode}`;
5531
7029
 
5532
7030
  export declare type SemanticErrorReason = `${KnownSemanticErrorReason}`;
@@ -5536,6 +7034,17 @@ export declare interface SemanticField {
5536
7034
  name: string;
5537
7035
  }
5538
7036
 
7037
+ /**
7038
+ * Defines values for SemanticFieldState. \
7039
+ * {@link KnownSemanticFieldState} can be used interchangeably with SemanticFieldState,
7040
+ * this enum contains the known values that the service supports.
7041
+ * ### Known values supported by the service
7042
+ * **used**: The field was fully used for semantic enrichment. \
7043
+ * **unused**: The field was not used for semantic enrichment. \
7044
+ * **partial**: The field was partially used for semantic enrichment.
7045
+ */
7046
+ export declare type SemanticFieldState = string;
7047
+
5539
7048
  /** Describes the title, content, and keywords fields to be used for semantic ranking, captions, highlights, and answers. */
5540
7049
  export declare interface SemanticPrioritizedFields {
5541
7050
  /** Defines the title field to be used for semantic ranking, captions, highlights, and answers. If you don't have a title field in your index, leave this blank. */
@@ -5546,6 +7055,15 @@ export declare interface SemanticPrioritizedFields {
5546
7055
  keywordsFields?: SemanticField[];
5547
7056
  }
5548
7057
 
7058
+ /**
7059
+ * Defines values for SemanticQueryRewritesResultType. \
7060
+ * {@link KnownSemanticQueryRewritesResultType} can be used interchangeably with SemanticQueryRewritesResultType,
7061
+ * this enum contains the known values that the service supports.
7062
+ * ### Known values supported by the service
7063
+ * **originalQueryOnly**: Query rewrites were not successfully generated for this request. Only the original query was used to retrieve the results.
7064
+ */
7065
+ export declare type SemanticQueryRewritesResultType = string;
7066
+
5549
7067
  /** Defines parameters for a search index that influence semantic capabilities. */
5550
7068
  export declare interface SemanticSearch {
5551
7069
  /** Allows you to set the name of a default semantic configuration in your index, making it optional to pass it on as a query parameter every time. */
@@ -5583,12 +7101,25 @@ export declare interface SemanticSearchOptions {
5583
7101
  * to 'None'.
5584
7102
  */
5585
7103
  captions?: QueryCaption;
7104
+ /**
7105
+ * When QueryRewrites is set to `generative`, the query terms are sent to a generate model which will
7106
+ * produce 10 (default) rewrites to help increase the recall of the request. Defaults to `none`.
7107
+ */
7108
+ queryRewrites?: QueryRewrites;
5586
7109
  /**
5587
7110
  * Allows setting a separate search query that will be solely used for semantic reranking,
5588
7111
  * semantic captions and semantic answers. Is useful for scenarios where there is a need to use
5589
7112
  * different queries between the base retrieval and ranking phase, and the L2 semantic phase.
5590
7113
  */
5591
7114
  semanticQuery?: string;
7115
+ /**
7116
+ * The list of field names used for semantic search.
7117
+ */
7118
+ semanticFields?: string[];
7119
+ /**
7120
+ * Enables a debugging tool that can be used to further explore your search results.
7121
+ */
7122
+ debugMode?: QueryDebugMode;
5592
7123
  }
5593
7124
 
5594
7125
  export declare type SemanticSearchResultsType = `${KnownSemanticSearchResultsType}`;
@@ -5621,6 +7152,8 @@ export declare interface SentimentSkillV3 extends BaseSearchIndexerSkill {
5621
7152
 
5622
7153
  /** Represents service-level resource counters and quotas. */
5623
7154
  export declare interface ServiceCounters {
7155
+ /** Total number of aliases. */
7156
+ aliasCounter: ResourceCounter;
5624
7157
  /** Total number of documents across all indexes in the service. */
5625
7158
  documentCounter: ResourceCounter;
5626
7159
  /** Total number of indexes. */
@@ -5723,9 +7256,10 @@ export declare interface SimpleField {
5723
7256
  * returned in a search result. You can disable this option if you don't plan to return the field
5724
7257
  * contents in a search response to save on storage overhead. This can only be set during index
5725
7258
  * creation and only for vector fields. This property cannot be changed for existing fields or set
5726
- * as false for new fields. If this property is set as false, the property 'hidden' must be set to
5727
- * 'true'. This property must be false or unset for key fields, for new fields, and for non-vector
5728
- * fields. Disabling this property will reduce index storage requirements.
7259
+ * as false for new fields. If this property is set to `false`, the property `hidden` must be set to
7260
+ * `true`. This property must be true or unset for key fields, for new fields, and for non-vector
7261
+ * fields, and it must be null for complex fields. Disabling this property will reduce index
7262
+ * storage requirements. The default is true for vector fields.
5729
7263
  */
5730
7264
  stored?: boolean;
5731
7265
  /**
@@ -5733,50 +7267,50 @@ export declare interface SimpleField {
5733
7267
  * analysis such as word-breaking during indexing. If you set a searchable field to a value like
5734
7268
  * "sunny day", internally it will be split into the individual tokens "sunny" and "day". This
5735
7269
  * enables full-text searches for these terms. Fields of type Edm.String or Collection(Edm.String)
5736
- * are searchable by default. This property must be false for simple fields of other non-string
5737
- * data types. Note: searchable fields consume extra space
5738
- * in your index to accommodate additional tokenized versions of the field value for full-text
7270
+ * are searchable by default. This property must be false for simple
7271
+ * fields of other non-string data types.
7272
+ * Note: searchable fields consume extra space in your index to accommodate additional tokenized versions of the field value for full-text
5739
7273
  * searches. If you want to save space in your index and you don't need a field to be included in
5740
7274
  * searches, set searchable to false. Default is false.
5741
7275
  */
5742
7276
  searchable?: boolean;
5743
7277
  /**
5744
- * A value indicating whether to enable the field to be referenced in $filter queries. filterable
5745
- * differs from searchable in how strings are handled. Fields of type Edm.String or
5746
- * Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are for
5747
- * exact matches only. For example, if you set such a field f to "sunny day", $filter=f eq 'sunny'
5748
- * will find no matches, but $filter=f eq 'sunny day' will. Default is false.
7278
+ * A value indicating whether to enable the field to be referenced in $filter queries. `filterable`
7279
+ * differs from `searchable` in how strings are handled. Fields of type Edm.String or
7280
+ * Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are
7281
+ * for exact matches only. For example, if you set such a field f to "sunny day", $filter=f eq
7282
+ * 'sunny' will find no matches, but $filter=f eq 'sunny day' will.
7283
+ * Default is false.
5749
7284
  */
5750
7285
  filterable?: boolean;
5751
7286
  /**
5752
7287
  * A value indicating whether to enable the field to be referenced in $orderby expressions. By
5753
- * default, the search engine sorts results by score, but in many experiences users will want to
5754
- * sort by fields in the documents. A simple field can be sortable only if it is single-valued (it
5755
- * has a single value in the scope of the parent document). Simple collection fields cannot be
5756
- * sortable, since they are multi-valued. Simple sub-fields of complex collections are also
7288
+ * default, the service sorts results by score, but in many experiences users will want
7289
+ * to sort by fields in the documents. A simple field can be sortable only if it is single-valued
7290
+ * (it has a single value in the scope of the parent document). Simple collection fields cannot
7291
+ * be sortable, since they are multi-valued. Simple sub-fields of complex collections are also
5757
7292
  * multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent
5758
7293
  * field, or an ancestor field, that's the complex collection. The default is false.
5759
- *
5760
7294
  */
5761
7295
  sortable?: boolean;
5762
7296
  /**
5763
7297
  * A value indicating whether to enable the field to be referenced in facet queries. Typically
5764
7298
  * used in a presentation of search results that includes hit count by category (for example,
5765
- * search for digital cameras and see hits by brand, by megapixels, by price, and so on). Fields
5766
- * of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. Default is
5767
- * false.
7299
+ * search for digital cameras and see hits by brand, by megapixels, by price, and so on).
7300
+ * Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable.
7301
+ * Default is false for all other simple fields.
5768
7302
  */
5769
7303
  facetable?: boolean;
5770
7304
  /**
5771
- * The name of the analyzer to use for the field. This option can be used only with searchable
5772
- * fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the
5773
- * analyzer is chosen, it cannot be changed for the field.
7305
+ * The name of the analyzer to use for the field. This option can be used only with
7306
+ * searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer.
7307
+ * Once the analyzer is chosen, it cannot be changed for the field.
5774
7308
  */
5775
7309
  analyzerName?: LexicalAnalyzerName;
5776
7310
  /**
5777
7311
  * The name of the analyzer used at search time for the field. This option can be used only with
5778
- * searchable fields. It must be set together with `indexAnalyzerName` and it cannot be set
5779
- * together with the `analyzerName` option. This property cannot be set to the name of a language
7312
+ * searchable fields. It must be set together with `indexAnalyzerName` and it cannot be set together
7313
+ * with the `analyzerName` option. This property cannot be set to the name of a language
5780
7314
  * analyzer; use the `analyzerName` property instead if you need a language analyzer. This
5781
7315
  * analyzer can be updated on an existing field.
5782
7316
  */
@@ -5784,18 +7318,22 @@ export declare interface SimpleField {
5784
7318
  /**
5785
7319
  * The name of the analyzer used at indexing time for the field. This option can be used only
5786
7320
  * with searchable fields. It must be set together with searchAnalyzer and it cannot be set
5787
- * together with the analyzer option. This property cannot be set to the name of a language
5788
- * analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer
5789
- * is chosen, it cannot be changed for the field.
7321
+ * together with the `analyzerName` option. Once the analyzer is chosen, it cannot be changed for the
7322
+ * field. KnownAnalyzerNames is an enum containing known values.
5790
7323
  */
5791
7324
  indexAnalyzerName?: LexicalAnalyzerName;
5792
7325
  /**
5793
7326
  * A list of the names of synonym maps to associate with this field. This option can be used only
5794
7327
  * with searchable fields. Currently only one synonym map per field is supported. Assigning a
5795
- * synonym map to a field ensures that query terms targeting that field are expanded at query-time
5796
- * using the rules in the synonym map. This attribute can be changed on existing fields.
7328
+ * synonym map to a field ensures that query terms targeting that field are expanded at
7329
+ * query-time using the rules in the synonym map. This attribute can be changed on existing
7330
+ * fields.
5797
7331
  */
5798
7332
  synonymMapNames?: string[];
7333
+ /**
7334
+ * The name of the normalizer used at indexing time for the field.
7335
+ */
7336
+ normalizerName?: LexicalNormalizerName;
5799
7337
  /**
5800
7338
  * The dimensionality of the vector field.
5801
7339
  */
@@ -5811,6 +7349,20 @@ export declare interface SimpleField {
5811
7349
  vectorEncodingFormat?: VectorEncodingFormat;
5812
7350
  }
5813
7351
 
7352
+ /** A single vector field result. Both @search.score and vector similarity values are returned. Vector similarity is related to @search.score by an equation. */
7353
+ export declare interface SingleVectorFieldResult {
7354
+ /**
7355
+ * The @search.score value that is calculated from the vector similarity score. This is the score that's visible in a pure single-field single-vector query.
7356
+ * NOTE: This property will not be serialized. It can only be populated by the server.
7357
+ */
7358
+ readonly searchScore?: number;
7359
+ /**
7360
+ * The vector similarity score for this document. Note this is the canonical definition of similarity metric, not the 'distance' version. For example, cosine similarity instead of cosine distance.
7361
+ * NOTE: This property will not be serialized. It can only be populated by the server.
7362
+ */
7363
+ readonly vectorSimilarity?: number;
7364
+ }
7365
+
5814
7366
  /** A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. */
5815
7367
  export declare interface SnowballTokenFilter extends BaseTokenFilter {
5816
7368
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -5836,16 +7388,46 @@ export declare interface SoftDeleteColumnDeletionDetectionPolicy extends BaseDat
5836
7388
  export declare interface SplitSkill extends BaseSearchIndexerSkill {
5837
7389
  /** Polymorphic discriminator, which specifies the different types this object can be */
5838
7390
  odatatype: "#Microsoft.Skills.Text.SplitSkill";
5839
- /** A value indicating which language code to use. Default is en. */
7391
+ /** A value indicating which language code to use. Default is `en`. */
5840
7392
  defaultLanguageCode?: SplitSkillLanguage;
5841
7393
  /** A value indicating which split mode to perform. */
5842
7394
  textSplitMode?: TextSplitMode;
5843
7395
  /** The desired maximum page length. Default is 10000. */
5844
7396
  maxPageLength?: number;
7397
+ /** Only applicable when textSplitMode is set to 'pages'. If specified, n+1th chunk will start with this number of characters/tokens from the end of the nth chunk. */
7398
+ pageOverlapLength?: number;
7399
+ /** Only applicable when textSplitMode is set to 'pages'. If specified, the SplitSkill will discontinue splitting after processing the first 'maximumPagesToTake' pages, in order to improve performance when only a few initial pages are needed from each document. */
7400
+ maximumPagesToTake?: number;
7401
+ /** Only applies if textSplitMode is set to pages. There are two possible values. The choice of the values will decide the length (maximumPageLength and pageOverlapLength) measurement. The default is 'characters', which means the length will be measured by character. */
7402
+ unit?: SplitSkillUnit;
7403
+ /** Only applies if the unit is set to azureOpenAITokens. If specified, the splitSkill will use these parameters when performing the tokenization. The parameters are a valid 'encoderModelName' and an optional 'allowedSpecialTokens' property. */
7404
+ azureOpenAITokenizerParameters?: AzureOpenAITokenizerParameters;
5845
7405
  }
5846
7406
 
7407
+ /**
7408
+ * Defines values for SplitSkillEncoderModelName. \
7409
+ * {@link KnownSplitSkillEncoderModelName} can be used interchangeably with SplitSkillEncoderModelName,
7410
+ * this enum contains the known values that the service supports.
7411
+ * ### Known values supported by the service
7412
+ * **r50k_base**: Refers to a base model trained with a 50,000 token vocabulary, often used in general natural language processing tasks. \
7413
+ * **p50k_base**: A base model with a 50,000 token vocabulary, optimized for prompt-based tasks. \
7414
+ * **p50k_edit**: Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token vocabulary. \
7415
+ * **cl100k_base**: A base model with a 100,000 token vocabulary.
7416
+ */
7417
+ export declare type SplitSkillEncoderModelName = string;
7418
+
5847
7419
  export declare type SplitSkillLanguage = `${KnownSplitSkillLanguage}`;
5848
7420
 
7421
+ /**
7422
+ * Defines values for SplitSkillUnit. \
7423
+ * {@link KnownSplitSkillUnit} can be used interchangeably with SplitSkillUnit,
7424
+ * this enum contains the known values that the service supports.
7425
+ * ### Known values supported by the service
7426
+ * **characters**: The length will be measured by character. \
7427
+ * **azureOpenAITokens**: The length will be measured by an AzureOpenAI tokenizer from the tiktoken library.
7428
+ */
7429
+ export declare type SplitSkillUnit = string;
7430
+
5849
7431
  /** Defines a data change detection policy that captures changes using the Integrated Change Tracking feature of Azure SQL Database. */
5850
7432
  export declare interface SqlIntegratedChangeTrackingPolicy extends BaseDataChangeDetectionPolicy {
5851
7433
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -6047,6 +7629,15 @@ export declare interface TagScoringParameters {
6047
7629
  tagsParameter: string;
6048
7630
  }
6049
7631
 
7632
+ /** The BM25 or Classic score for the text portion of the query. */
7633
+ export declare interface TextResult {
7634
+ /**
7635
+ * The BM25 or Classic score for the text portion of the query.
7636
+ * NOTE: This property will not be serialized. It can only be populated by the server.
7637
+ */
7638
+ readonly searchScore?: number;
7639
+ }
7640
+
6050
7641
  export declare type TextSplitMode = `${KnownTextSplitMode}`;
6051
7642
 
6052
7643
  /** A skill to translate text from one language to another. */
@@ -6071,6 +7662,18 @@ export declare interface TextWeights {
6071
7662
  };
6072
7663
  }
6073
7664
 
7665
+ /**
7666
+ * Specifies the properties for connecting to an AML vectorizer with a managed identity.
7667
+ */
7668
+ export declare interface TokenAuthAzureMachineLearningVectorizerParameters extends BaseAzureMachineLearningVectorizerParameters {
7669
+ /** Indicates how the service should attempt to identify itself to the AML instance */
7670
+ authKind: "token";
7671
+ /** The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/\{guid\}/resourceGroups/\{resource-group-name\}/Microsoft.MachineLearningServices/workspaces/\{workspace-name\}/services/\{service_name\}. */
7672
+ resourceId: string;
7673
+ /** The region the AML service is deployed in. */
7674
+ region?: string;
7675
+ }
7676
+
6074
7677
  /** Defines values for TokenCharacterKind. */
6075
7678
  export declare type TokenCharacterKind = "letter" | "digit" | "whitespace" | "punctuation" | "symbol";
6076
7679
 
@@ -6163,12 +7766,33 @@ export declare type VectorEncodingFormat = string;
6163
7766
 
6164
7767
  export declare type VectorFilterMode = `${KnownVectorFilterMode}`;
6165
7768
 
7769
+ /** The query parameters to use for vector search when a base 64 encoded binary of an image that needs to be vectorized is provided. */
7770
+ export declare interface VectorizableImageBinaryQuery<TModel extends object> extends BaseVectorQuery<TModel> {
7771
+ /** Polymorphic discriminator, which specifies the different types this object can be */
7772
+ kind: "imageBinary";
7773
+ /** The base64 encoded binary of an image to be vectorized to perform a vector search query. */
7774
+ binaryImage: string;
7775
+ }
7776
+
7777
+ /** The query parameters to use for vector search when an url that represents an image value that needs to be vectorized is provided. */
7778
+ export declare interface VectorizableImageUrlQuery<TModel extends object> extends BaseVectorQuery<TModel> {
7779
+ /** Polymorphic discriminator, which specifies the different types this object can be */
7780
+ kind: "imageUrl";
7781
+ /** The URL of an image to be vectorized to perform a vector search query. */
7782
+ url: string;
7783
+ }
7784
+
6166
7785
  /** The query parameters to use for vector search when a text value that needs to be vectorized is provided. */
6167
7786
  export declare interface VectorizableTextQuery<TModel extends object> extends BaseVectorQuery<TModel> {
6168
7787
  /** Polymorphic discriminator, which specifies the different types this object can be */
6169
7788
  kind: "text";
6170
7789
  /** The text to be vectorized to perform a vector search query. */
6171
7790
  text: string;
7791
+ /**
7792
+ * Can be configured to let a generative model rewrite the query before sending it to be
7793
+ * vectorized.
7794
+ */
7795
+ queryRewrites?: QueryRewrites;
6172
7796
  }
6173
7797
 
6174
7798
  /** The query parameters to use for vector search when a raw vector value is provided. */
@@ -6180,19 +7804,30 @@ export declare interface VectorizedQuery<TModel extends object> extends BaseVect
6180
7804
  }
6181
7805
 
6182
7806
  /** The query parameters for vector and hybrid search queries. */
6183
- export declare type VectorQuery<TModel extends object> = VectorizedQuery<TModel> | VectorizableTextQuery<TModel>;
7807
+ export declare type VectorQuery<TModel extends object> = VectorizedQuery<TModel> | VectorizableTextQuery<TModel> | VectorizableImageUrlQuery<TModel> | VectorizableImageBinaryQuery<TModel>;
6184
7808
 
6185
7809
  export declare type VectorQueryKind = `${KnownVectorQueryKind}`;
6186
7810
 
7811
+ export declare interface VectorsDebugInfo {
7812
+ /**
7813
+ * The breakdown of subscores of the document prior to the chosen result set fusion/combination method such as RRF.
7814
+ * NOTE: This property will not be serialized. It can only be populated by the server.
7815
+ */
7816
+ readonly subscores?: QueryResultDocumentSubscores;
7817
+ }
7818
+
6187
7819
  /** Contains configuration options related to vector search. */
6188
7820
  export declare interface VectorSearch {
6189
7821
  /** Defines combinations of configurations to use with vector search. */
6190
7822
  profiles?: VectorSearchProfile[];
6191
- /** Contains configuration options specific to the algorithm used during indexing or querying. */
7823
+ /** Contains configuration options specific to the algorithm used during indexing and/or querying. */
6192
7824
  algorithms?: VectorSearchAlgorithmConfiguration[];
6193
7825
  /** Contains configuration options on how to vectorize text vector queries. */
6194
7826
  vectorizers?: VectorSearchVectorizer[];
6195
- /** Contains configuration options specific to the compression method used during indexing or querying. */
7827
+ /**
7828
+ * Contains configuration options specific to the compression method used during indexing or
7829
+ * querying.
7830
+ */
6196
7831
  compressions?: VectorSearchCompression[];
6197
7832
  }
6198
7833
 
@@ -6216,6 +7851,16 @@ export declare type VectorSearchCompression = BinaryQuantizationCompression | Sc
6216
7851
  */
6217
7852
  export declare type VectorSearchCompressionKind = string;
6218
7853
 
7854
+ /**
7855
+ * Defines values for VectorSearchCompressionRescoreStorageMethod. \
7856
+ * {@link KnownVectorSearchCompressionRescoreStorageMethod} can be used interchangeably with VectorSearchCompressionRescoreStorageMethod,
7857
+ * this enum contains the known values that the service supports.
7858
+ * ### Known values supported by the service
7859
+ * **preserveOriginals**: This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. \
7860
+ * **discardOriginals**: This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality.
7861
+ */
7862
+ export declare type VectorSearchCompressionRescoreStorageMethod = string;
7863
+
6219
7864
  /**
6220
7865
  * Defines values for VectorSearchCompressionTarget. \
6221
7866
  * {@link KnownVectorSearchCompressionTarget} can be used interchangeably with VectorSearchCompressionTarget,
@@ -6253,7 +7898,7 @@ export declare interface VectorSearchProfile {
6253
7898
  }
6254
7899
 
6255
7900
  /** Contains configuration options on how to vectorize text vector queries. */
6256
- export declare type VectorSearchVectorizer = AzureOpenAIVectorizer | WebApiVectorizer;
7901
+ export declare type VectorSearchVectorizer = AIServicesVisionVectorizer | AzureMachineLearningVectorizer | AzureOpenAIVectorizer | WebApiVectorizer;
6257
7902
 
6258
7903
  /**
6259
7904
  * Defines values for VectorSearchVectorizerKind. \
@@ -6261,10 +7906,31 @@ export declare type VectorSearchVectorizer = AzureOpenAIVectorizer | WebApiVecto
6261
7906
  * this enum contains the known values that the service supports.
6262
7907
  * ### Known values supported by the service
6263
7908
  * **azureOpenAI**: Generate embeddings using an Azure OpenAI resource at query time. \
6264
- * **customWebApi**: Generate embeddings using a custom web endpoint at query time.
7909
+ * **customWebApi**: Generate embeddings using a custom web endpoint at query time. \
7910
+ * **aiServicesVision**: Generate embeddings for an image or text input at query time using the Azure AI Services Vision Vectorize API. \
7911
+ * **aml**: Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Studio Model Catalog at query time.
6265
7912
  */
6266
7913
  export declare type VectorSearchVectorizerKind = string;
6267
7914
 
7915
+ /** The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */
7916
+ export declare interface VectorSimilarityThreshold extends BaseVectorThreshold {
7917
+ /** Polymorphic discriminator, which specifies the different types this object can be */
7918
+ kind: "vectorSimilarity";
7919
+ /** The threshold will filter based on the similarity metric value. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */
7920
+ value: number;
7921
+ }
7922
+
7923
+ /** The threshold used for vector queries. */
7924
+ export declare type VectorThreshold = VectorSimilarityThreshold | SearchScoreThreshold;
7925
+
7926
+ /** Allows you to generate a vector embedding for a given image or text input using the Azure AI Services Vision Vectorize API. */
7927
+ export declare interface VisionVectorizeSkill extends BaseSearchIndexerSkill {
7928
+ /** Polymorphic discriminator, which specifies the different types this object can be */
7929
+ odatatype: "#Microsoft.Skills.Vision.VectorizeSkill";
7930
+ /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */
7931
+ modelVersion?: string;
7932
+ }
7933
+
6268
7934
  export declare type VisualFeature = `${KnownVisualFeature}`;
6269
7935
 
6270
7936
  /** Specifies the properties for connecting to a user-defined vectorizer. */