@azure/search-documents 12.1.0 → 12.2.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. package/README.md +1 -1
  2. package/dist/index.js +2298 -426
  3. package/dist/index.js.map +1 -1
  4. package/dist-esm/src/base64.browser.js +1 -1
  5. package/dist-esm/src/base64.browser.js.map +1 -1
  6. package/dist-esm/src/base64.js +1 -1
  7. package/dist-esm/src/base64.js.map +1 -1
  8. package/dist-esm/src/errorModels.js +1 -1
  9. package/dist-esm/src/errorModels.js.map +1 -1
  10. package/dist-esm/src/generated/data/models/index.js +208 -6
  11. package/dist-esm/src/generated/data/models/index.js.map +1 -1
  12. package/dist-esm/src/generated/data/models/mappers.js +378 -0
  13. package/dist-esm/src/generated/data/models/mappers.js.map +1 -1
  14. package/dist-esm/src/generated/data/models/parameters.js +42 -0
  15. package/dist-esm/src/generated/data/models/parameters.js.map +1 -1
  16. package/dist-esm/src/generated/data/operations/documents.js +4 -0
  17. package/dist-esm/src/generated/data/operations/documents.js.map +1 -1
  18. package/dist-esm/src/generated/data/searchClient.js +1 -1
  19. package/dist-esm/src/generated/data/searchClient.js.map +1 -1
  20. package/dist-esm/src/generated/service/models/index.js +154 -84
  21. package/dist-esm/src/generated/service/models/index.js.map +1 -1
  22. package/dist-esm/src/generated/service/models/mappers.js +684 -70
  23. package/dist-esm/src/generated/service/models/mappers.js.map +1 -1
  24. package/dist-esm/src/generated/service/models/parameters.js +51 -1
  25. package/dist-esm/src/generated/service/models/parameters.js.map +1 -1
  26. package/dist-esm/src/generated/service/operations/aliases.js +160 -0
  27. package/dist-esm/src/generated/service/operations/aliases.js.map +1 -0
  28. package/dist-esm/src/generated/service/operations/dataSources.js +4 -1
  29. package/dist-esm/src/generated/service/operations/dataSources.js.map +1 -1
  30. package/dist-esm/src/generated/service/operations/index.js +1 -0
  31. package/dist-esm/src/generated/service/operations/index.js.map +1 -1
  32. package/dist-esm/src/generated/service/operations/indexers.js +29 -1
  33. package/dist-esm/src/generated/service/operations/indexers.js.map +1 -1
  34. package/dist-esm/src/generated/service/operations/skillsets.js +30 -1
  35. package/dist-esm/src/generated/service/operations/skillsets.js.map +1 -1
  36. package/dist-esm/src/generated/service/operationsInterfaces/aliases.js +9 -0
  37. package/dist-esm/src/generated/service/operationsInterfaces/aliases.js.map +1 -0
  38. package/dist-esm/src/generated/service/operationsInterfaces/index.js +1 -0
  39. package/dist-esm/src/generated/service/operationsInterfaces/index.js.map +1 -1
  40. package/dist-esm/src/generated/service/operationsInterfaces/indexers.js.map +1 -1
  41. package/dist-esm/src/generated/service/operationsInterfaces/skillsets.js.map +1 -1
  42. package/dist-esm/src/generated/service/searchServiceClient.js +3 -2
  43. package/dist-esm/src/generated/service/searchServiceClient.js.map +1 -1
  44. package/dist-esm/src/geographyPoint.js +1 -1
  45. package/dist-esm/src/geographyPoint.js.map +1 -1
  46. package/dist-esm/src/index.js +4 -4
  47. package/dist-esm/src/index.js.map +1 -1
  48. package/dist-esm/src/indexDocumentsBatch.js +1 -1
  49. package/dist-esm/src/indexDocumentsBatch.js.map +1 -1
  50. package/dist-esm/src/indexModels.js +1 -1
  51. package/dist-esm/src/indexModels.js.map +1 -1
  52. package/dist-esm/src/logger.js +1 -1
  53. package/dist-esm/src/logger.js.map +1 -1
  54. package/dist-esm/src/odata.js +1 -1
  55. package/dist-esm/src/odata.js.map +1 -1
  56. package/dist-esm/src/odataMetadataPolicy.js +1 -1
  57. package/dist-esm/src/odataMetadataPolicy.js.map +1 -1
  58. package/dist-esm/src/searchApiKeyCredentialPolicy.js +1 -1
  59. package/dist-esm/src/searchApiKeyCredentialPolicy.js.map +1 -1
  60. package/dist-esm/src/searchAudience.js +1 -1
  61. package/dist-esm/src/searchAudience.js.map +1 -1
  62. package/dist-esm/src/searchClient.js +11 -4
  63. package/dist-esm/src/searchClient.js.map +1 -1
  64. package/dist-esm/src/searchIndexClient.js +153 -4
  65. package/dist-esm/src/searchIndexClient.js.map +1 -1
  66. package/dist-esm/src/searchIndexerClient.js +48 -1
  67. package/dist-esm/src/searchIndexerClient.js.map +1 -1
  68. package/dist-esm/src/searchIndexingBufferedSender.js +1 -1
  69. package/dist-esm/src/searchIndexingBufferedSender.js.map +1 -1
  70. package/dist-esm/src/serialization.js +1 -1
  71. package/dist-esm/src/serialization.js.map +1 -1
  72. package/dist-esm/src/serviceModels.js +1 -1
  73. package/dist-esm/src/serviceModels.js.map +1 -1
  74. package/dist-esm/src/serviceUtils.js +79 -19
  75. package/dist-esm/src/serviceUtils.js.map +1 -1
  76. package/dist-esm/src/synonymMapHelper.browser.js +1 -1
  77. package/dist-esm/src/synonymMapHelper.browser.js.map +1 -1
  78. package/dist-esm/src/synonymMapHelper.js +1 -1
  79. package/dist-esm/src/synonymMapHelper.js.map +1 -1
  80. package/dist-esm/src/tracing.js +1 -1
  81. package/dist-esm/src/tracing.js.map +1 -1
  82. package/dist-esm/src/walk.js +1 -1
  83. package/dist-esm/src/walk.js.map +1 -1
  84. package/package.json +6 -6
  85. package/types/search-documents.d.ts +1515 -98
@@ -5,9 +5,51 @@ import { ExtendedCommonClientOptions } from '@azure/core-http-compat';
5
5
  import { KeyCredential } from '@azure/core-auth';
6
6
  import { OperationOptions } from '@azure/core-client';
7
7
  import { PagedAsyncIterableIterator } from '@azure/core-paging';
8
+ import { Pipeline } from '@azure/core-rest-pipeline';
8
9
  import { RestError } from '@azure/core-rest-pipeline';
9
10
  import { TokenCredential } from '@azure/core-auth';
10
11
 
12
+ /** Specifies the AI Services Vision parameters for vectorizing a query image or text. */
13
+ export declare interface AIServicesVisionParameters {
14
+ /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */
15
+ modelVersion?: string;
16
+ /** The resource URI of the AI Services resource. */
17
+ resourceUri: string;
18
+ /** API key of the designated AI Services resource. */
19
+ apiKey?: string;
20
+ /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the index, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */
21
+ authIdentity?: SearchIndexerDataIdentity;
22
+ }
23
+
24
+ /** Specifies the AI Services Vision parameters for vectorizing a query image or text. */
25
+ export declare interface AIServicesVisionVectorizer extends BaseVectorSearchVectorizer {
26
+ /** Polymorphic discriminator, which specifies the different types this object can be */
27
+ kind: "aiServicesVision";
28
+ /** Contains the parameters specific to AI Services Vision embedding vectorization. */
29
+ parameters?: AIServicesVisionParameters;
30
+ }
31
+
32
+ /**
33
+ * Defines values for AIStudioModelCatalogName. \
34
+ * {@link KnownAIStudioModelCatalogName} can be used interchangeably with AIStudioModelCatalogName,
35
+ * this enum contains the known values that the service supports.
36
+ * ### Known values supported by the service
37
+ * **OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32** \
38
+ * **OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336** \
39
+ * **Facebook-DinoV2-Image-Embeddings-ViT-Base** \
40
+ * **Facebook-DinoV2-Image-Embeddings-ViT-Giant** \
41
+ * **Cohere-embed-v3-english** \
42
+ * **Cohere-embed-v3-multilingual**
43
+ */
44
+ export declare type AIStudioModelCatalogName = string;
45
+
46
+ /**
47
+ * An iterator for listing the aliases that exist in the Search service. This will make requests
48
+ * as needed during iteration. Use .byPage() to make one request to the server
49
+ * per iteration.
50
+ */
51
+ export declare type AliasIterator = PagedAsyncIterableIterator<SearchIndexAlias, SearchIndexAlias[], {}>;
52
+
11
53
  /** Information about a token returned by an analyzer. */
12
54
  export declare interface AnalyzedTokenInfo {
13
55
  /**
@@ -54,6 +96,11 @@ export declare interface AnalyzeRequest {
54
96
  * NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.
55
97
  */
56
98
  tokenizerName?: LexicalTokenizerName;
99
+ /**
100
+ * The name of the normalizer to use to normalize the given text. {@link KnownNormalizerNames} is
101
+ * an enum containing built-in analyzer names.
102
+ */
103
+ normalizerName?: LexicalNormalizerName;
57
104
  /**
58
105
  * An optional list of token filters to use when breaking the given text. This parameter can only
59
106
  * be set when using the tokenizer parameter.
@@ -183,6 +230,35 @@ export declare interface AzureActiveDirectoryApplicationCredentials {
183
230
 
184
231
  export { AzureKeyCredential }
185
232
 
233
+ /** The AML skill allows you to extend AI enrichment with a custom Azure Machine Learning (AML) model. Once an AML model is trained and deployed, an AML skill integrates it into AI enrichment. */
234
+ export declare interface AzureMachineLearningSkill extends BaseSearchIndexerSkill {
235
+ /** Polymorphic discriminator, which specifies the different types this object can be */
236
+ odatatype: "#Microsoft.Skills.Custom.AmlSkill";
237
+ /** (Required for no authentication or key authentication) The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */
238
+ scoringUri?: string;
239
+ /** (Required for key authentication) The key for the AML service. */
240
+ authenticationKey?: string;
241
+ /** (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. */
242
+ resourceId?: string;
243
+ /** (Optional) When specified, indicates the timeout for the http client making the API call. */
244
+ timeout?: string;
245
+ /** (Optional for token authentication). The region the AML service is deployed in. */
246
+ region?: string;
247
+ /** (Optional) When specified, indicates the number of calls the indexer will make in parallel to the endpoint you have provided. You can decrease this value if your endpoint is failing under too high of a request load, or raise it if your endpoint is able to accept more requests and you would like an increase in the performance of the indexer. If not set, a default value of 5 is used. The degreeOfParallelism can be set to a maximum of 10 and a minimum of 1. */
248
+ degreeOfParallelism?: number;
249
+ }
250
+
251
+ /** Specifies an Azure Machine Learning endpoint deployed via the Azure AI Studio Model Catalog for generating the vector embedding of a query string. */
252
+ export declare interface AzureMachineLearningVectorizer extends BaseVectorSearchVectorizer {
253
+ /** Polymorphic discriminator, which specifies the different types this object can be */
254
+ kind: "aml";
255
+ /** Specifies the properties of the AML vectorizer. */
256
+ amlParameters?: AzureMachineLearningVectorizerParameters;
257
+ }
258
+
259
+ /** Specifies the properties for connecting to an AML vectorizer. */
260
+ export declare type AzureMachineLearningVectorizerParameters = NoAuthAzureMachineLearningVectorizerParameters | KeyAuthAzureMachineLearningVectorizerParameters | TokenAuthAzureMachineLearningVectorizerParameters;
261
+
186
262
  /** Allows you to generate a vector embedding for a given text input using the Azure OpenAI resource. */
187
263
  export declare interface AzureOpenAIEmbeddingSkill extends BaseSearchIndexerSkill, AzureOpenAIParameters {
188
264
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -202,13 +278,13 @@ export declare interface AzureOpenAIEmbeddingSkill extends BaseSearchIndexerSkil
202
278
  */
203
279
  export declare type AzureOpenAIModelName = string;
204
280
 
205
- /** Contains the parameters specific to using an Azure Open AI service for vectorization at query time. */
281
+ /** Specifies the parameters for connecting to the Azure OpenAI resource. */
206
282
  export declare interface AzureOpenAIParameters {
207
- /** The resource uri for your Azure Open AI resource. */
283
+ /** The resource URI of the Azure OpenAI resource. */
208
284
  resourceUrl?: string;
209
- /** ID of your Azure Open AI model deployment on the designated resource. */
285
+ /** ID of the Azure OpenAI model deployment on the designated resource. */
210
286
  deploymentId?: string;
211
- /** API key for the designated Azure Open AI resource. */
287
+ /** API key of the designated Azure OpenAI resource. */
212
288
  apiKey?: string;
213
289
  /** The user-assigned managed identity used for outbound connections. */
214
290
  authIdentity?: SearchIndexerDataIdentity;
@@ -216,6 +292,13 @@ export declare interface AzureOpenAIParameters {
216
292
  modelName?: AzureOpenAIModelName;
217
293
  }
218
294
 
295
+ export declare interface AzureOpenAITokenizerParameters {
296
+ /** Only applies if the unit is set to azureOpenAITokens. Options include 'R50k_base', 'P50k_base', 'P50k_edit' and 'CL100k_base'. The default value is 'CL100k_base'. */
297
+ encoderModelName?: SplitSkillEncoderModelName;
298
+ /** (Optional) Only applies if the unit is set to azureOpenAITokens. This parameter defines a collection of special tokens that are permitted within the tokenization process. */
299
+ allowedSpecialTokens?: string[];
300
+ }
301
+
219
302
  /** Contains the parameters specific to using an Azure Open AI service for vectorization at query time. */
220
303
  export declare interface AzureOpenAIVectorizer extends BaseVectorSearchVectorizer {
221
304
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -224,6 +307,14 @@ export declare interface AzureOpenAIVectorizer extends BaseVectorSearchVectorize
224
307
  parameters?: AzureOpenAIParameters;
225
308
  }
226
309
 
310
+ /** Specifies the properties common between all AML vectorizer auth types. */
311
+ export declare interface BaseAzureMachineLearningVectorizerParameters {
312
+ /** When specified, indicates the timeout for the http client making the API call. */
313
+ timeout?: string;
314
+ /** The name of the embedding model from the Azure AI Studio Catalog that is deployed at the provided endpoint. */
315
+ modelName?: AIStudioModelCatalogName;
316
+ }
317
+
227
318
  /** Base type for character filters. */
228
319
  export declare interface BaseCharFilter {
229
320
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -249,7 +340,7 @@ export declare interface BaseDataChangeDetectionPolicy {
249
340
  /** Base type for data deletion detection policies. */
250
341
  export declare interface BaseDataDeletionDetectionPolicy {
251
342
  /** Polymorphic discriminator, which specifies the different types this object can be */
252
- odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy";
343
+ odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy" | "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy";
253
344
  }
254
345
 
255
346
  /** Base type for analyzers. */
@@ -260,6 +351,14 @@ export declare interface BaseLexicalAnalyzer {
260
351
  name: string;
261
352
  }
262
353
 
354
+ /** Base type for normalizers. */
355
+ export declare interface BaseLexicalNormalizer {
356
+ /** Polymorphic discriminator, which specifies the different types this object can be */
357
+ odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
358
+ /** The name of the normalizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. */
359
+ name: string;
360
+ }
361
+
263
362
  /** Base type for tokenizers. */
264
363
  export declare interface BaseLexicalTokenizer {
265
364
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -289,7 +388,7 @@ export declare interface BaseSearchIndexerDataIdentity {
289
388
  /** Base type for skills. */
290
389
  export declare interface BaseSearchIndexerSkill {
291
390
  /** Polymorphic discriminator, which specifies the different types this object can be */
292
- odatatype: "#Microsoft.Skills.Util.ConditionalSkill" | "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" | "#Microsoft.Skills.Vision.OcrSkill" | "#Microsoft.Skills.Vision.ImageAnalysisSkill" | "#Microsoft.Skills.Text.LanguageDetectionSkill" | "#Microsoft.Skills.Util.ShaperSkill" | "#Microsoft.Skills.Text.MergeSkill" | "#Microsoft.Skills.Text.EntityRecognitionSkill" | "#Microsoft.Skills.Text.SentimentSkill" | "#Microsoft.Skills.Text.V3.SentimentSkill" | "#Microsoft.Skills.Text.V3.EntityLinkingSkill" | "#Microsoft.Skills.Text.V3.EntityRecognitionSkill" | "#Microsoft.Skills.Text.PIIDetectionSkill" | "#Microsoft.Skills.Text.SplitSkill" | "#Microsoft.Skills.Text.CustomEntityLookupSkill" | "#Microsoft.Skills.Text.TranslationSkill" | "#Microsoft.Skills.Util.DocumentExtractionSkill" | "#Microsoft.Skills.Custom.WebApiSkill" | "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill";
391
+ odatatype: "#Microsoft.Skills.Util.ConditionalSkill" | "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" | "#Microsoft.Skills.Vision.OcrSkill" | "#Microsoft.Skills.Vision.ImageAnalysisSkill" | "#Microsoft.Skills.Text.LanguageDetectionSkill" | "#Microsoft.Skills.Util.ShaperSkill" | "#Microsoft.Skills.Text.MergeSkill" | "#Microsoft.Skills.Text.EntityRecognitionSkill" | "#Microsoft.Skills.Text.SentimentSkill" | "#Microsoft.Skills.Text.V3.SentimentSkill" | "#Microsoft.Skills.Text.V3.EntityLinkingSkill" | "#Microsoft.Skills.Text.V3.EntityRecognitionSkill" | "#Microsoft.Skills.Text.PIIDetectionSkill" | "#Microsoft.Skills.Text.SplitSkill" | "#Microsoft.Skills.Text.CustomEntityLookupSkill" | "#Microsoft.Skills.Text.TranslationSkill" | "#Microsoft.Skills.Util.DocumentExtractionSkill" | "#Microsoft.Skills.Custom.WebApiSkill" | "#Microsoft.Skills.Custom.AmlSkill" | "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill" | "#Microsoft.Skills.Vision.VectorizeSkill";
293
392
  /** The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. */
294
393
  name?: string;
295
394
  /** The description of the skill which describes the inputs, outputs, and usage of the skill. */
@@ -374,6 +473,14 @@ export declare interface BaseSearchRequestOptions<TModel extends object, TFields
374
473
  * fielded search expression take precedence over any field names listed in this parameter.
375
474
  */
376
475
  searchFields?: SearchFieldArray<TModel>;
476
+ /**
477
+ * The language of the query.
478
+ */
479
+ queryLanguage?: QueryLanguage;
480
+ /**
481
+ * Improve search recall by spell-correcting individual search query terms.
482
+ */
483
+ speller?: Speller;
377
484
  /**
378
485
  * A value that specifies whether any or all of the search terms must be matched in order to
379
486
  * count the document as a match. Possible values include: 'any', 'all'
@@ -415,6 +522,8 @@ export declare interface BaseSearchRequestOptions<TModel extends object, TFields
415
522
  * Defines options for vector search queries
416
523
  */
417
524
  vectorSearchOptions?: VectorSearchOptions<TModel>;
525
+ /** The query parameters to configure hybrid search behaviors. */
526
+ hybridSearch?: HybridSearchOptions;
418
527
  }
419
528
 
420
529
  /** Base type for token filters. */
@@ -431,6 +540,8 @@ export declare interface BaseVectorQuery<TModel extends object> {
431
540
  * ### Known values supported by the service
432
541
  * **vector**: Vector query where a raw vector value is provided.
433
542
  * **text**: Vector query where a text value that needs to be vectorized is provided.
543
+ * **imageUrl**: Vector query where an url that represents an image value that needs to be vectorized is provided.
544
+ * **imageBinary**: Vector query where a base 64 encoded binary of an image that needs to be vectorized is provided.
434
545
  */
435
546
  kind: VectorQueryKind;
436
547
  /** Number of nearest neighbors to return as top hits. */
@@ -452,6 +563,11 @@ export declare interface BaseVectorQuery<TModel extends object> {
452
563
  oversampling?: number;
453
564
  /** Relative weight of the vector query when compared to other vector query and/or the text query within the same search request. This value is used when combining the results of multiple ranking lists produced by the different vector queries and/or the results retrieved through the text query. The higher the weight, the higher the documents that matched that query will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger than zero. */
454
565
  weight?: number;
566
+ /** The threshold used for vector queries. Note this can only be set if all 'fields' use the same similarity metric. */
567
+ threshold?: VectorThreshold;
568
+ /** The OData filter expression to apply to this specific vector query. If no filter expression is defined at the vector level, the expression defined in
569
+ * the top level filter parameter is used instead. */
570
+ filterOverride?: string;
455
571
  }
456
572
 
457
573
  /** Contains configuration options specific to the algorithm used during indexing and/or querying. */
@@ -472,6 +588,8 @@ export declare interface BaseVectorSearchCompression {
472
588
  rerankWithOriginalVectors?: boolean;
473
589
  /** Default oversampling factor. Oversampling will internally request more documents (specified by this multiplier) in the initial search. This increases the set of results that will be reranked using recomputed similarity scores from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. */
474
590
  defaultOversampling?: number;
591
+ /** The number of dimensions to truncate the vectors to. Truncating the vectors reduces the size of the vectors and the amount of data that needs to be transferred during search. This can save storage cost and improve search performance at the expense of recall. It should be only used for embeddings trained with Matryoshka Representation Learning (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no truncation. */
592
+ truncationDimension?: number;
475
593
  }
476
594
 
477
595
  /** Contains specific details for a vectorization method to be used during query time. */
@@ -482,6 +600,12 @@ export declare interface BaseVectorSearchVectorizer {
482
600
  vectorizerName: string;
483
601
  }
484
602
 
603
+ /** The threshold used for vector queries. */
604
+ export declare interface BaseVectorThreshold {
605
+ /** Polymorphic discriminator, which specifies the different types this object can be */
606
+ kind: "vectorSimilarity" | "searchScore";
607
+ }
608
+
485
609
  /** Contains configuration options specific to the binary quantization compression method used during indexing and querying. */
486
610
  export declare interface BinaryQuantizationCompression extends BaseVectorSearchCompression {
487
611
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -619,6 +743,11 @@ export declare interface CorsOptions {
619
743
  */
620
744
  export declare type CountDocumentsOptions = OperationOptions;
621
745
 
746
+ /**
747
+ * Options for create alias operation.
748
+ */
749
+ export declare type CreateAliasOptions = OperationOptions;
750
+
622
751
  /**
623
752
  * Options for create datasource operation.
624
753
  */
@@ -634,6 +763,16 @@ export declare type CreateIndexerOptions = OperationOptions;
634
763
  */
635
764
  export declare type CreateIndexOptions = OperationOptions;
636
765
 
766
+ /**
767
+ * Options for create or update alias operation.
768
+ */
769
+ export declare interface CreateOrUpdateAliasOptions extends OperationOptions {
770
+ /**
771
+ * If set to true, Resource will be deleted only if the etag matches.
772
+ */
773
+ onlyIfUnchanged?: boolean;
774
+ }
775
+
637
776
  /**
638
777
  * Options for create/update datasource operation.
639
778
  */
@@ -642,6 +781,10 @@ export declare interface CreateorUpdateDataSourceConnectionOptions extends Opera
642
781
  * If set to true, Resource will be updated only if the etag matches.
643
782
  */
644
783
  onlyIfUnchanged?: boolean;
784
+ /**
785
+ * Ignores cache reset requirements.
786
+ */
787
+ skipIndexerResetRequirementForCache?: boolean;
645
788
  }
646
789
 
647
790
  /**
@@ -652,6 +795,10 @@ export declare interface CreateorUpdateIndexerOptions extends OperationOptions {
652
795
  * If set to true, Resource will be updated only if the etag matches.
653
796
  */
654
797
  onlyIfUnchanged?: boolean;
798
+ /** Ignores cache reset requirements. */
799
+ skipIndexerResetRequirementForCache?: boolean;
800
+ /** Disables cache reprocessing change detection. */
801
+ disableCacheReprocessingChangeDetection?: boolean;
655
802
  }
656
803
 
657
804
  /**
@@ -679,6 +826,14 @@ export declare interface CreateOrUpdateSkillsetOptions extends OperationOptions
679
826
  * If set to true, Resource will be updated only if the etag matches.
680
827
  */
681
828
  onlyIfUnchanged?: boolean;
829
+ /**
830
+ * Ignores cache reset requirements.
831
+ */
832
+ skipIndexerResetRequirementForCache?: boolean;
833
+ /**
834
+ * Disables cache reprocessing change detection.
835
+ */
836
+ disableCacheReprocessingChangeDetection?: boolean;
682
837
  }
683
838
 
684
839
  /**
@@ -805,6 +960,16 @@ export declare interface CustomEntityLookupSkill extends BaseSearchIndexerSkill
805
960
 
806
961
  export declare type CustomEntityLookupSkillLanguage = `${KnownCustomEntityLookupSkillLanguage}`;
807
962
 
963
+ /** Allows you to configure normalization for filterable, sortable, and facetable fields, which by default operate with strict matching. This is a user-defined configuration consisting of at least one or more filters, which modify the token that is stored. */
964
+ export declare interface CustomNormalizer extends BaseLexicalNormalizer {
965
+ /** Polymorphic discriminator, which specifies the different types this object can be */
966
+ odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
967
+ /** A list of token filters used to filter out or modify the input token. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */
968
+ tokenFilters?: TokenFilterName[];
969
+ /** A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */
970
+ charFilters?: CharFilterName[];
971
+ }
972
+
808
973
  /**
809
974
  * Contains the possible cases for DataChangeDetectionPolicy.
810
975
  */
@@ -813,7 +978,7 @@ export declare type DataChangeDetectionPolicy = HighWaterMarkChangeDetectionPoli
813
978
  /**
814
979
  * Contains the possible cases for DataDeletionDetectionPolicy.
815
980
  */
816
- export declare type DataDeletionDetectionPolicy = SoftDeleteColumnDeletionDetectionPolicy;
981
+ export declare type DataDeletionDetectionPolicy = SoftDeleteColumnDeletionDetectionPolicy | NativeBlobSoftDeleteDeletionDetectionPolicy;
817
982
 
818
983
  /**
819
984
  * Default Batch Size
@@ -836,6 +1001,16 @@ export declare interface DefaultCognitiveServicesAccount extends BaseCognitiveSe
836
1001
  odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices";
837
1002
  }
838
1003
 
1004
+ /**
1005
+ * Options for delete alias operation.
1006
+ */
1007
+ export declare interface DeleteAliasOptions extends OperationOptions {
1008
+ /**
1009
+ * If set to true, Resource will be deleted only if the etag matches.
1010
+ */
1011
+ onlyIfUnchanged?: boolean;
1012
+ }
1013
+
839
1014
  /**
840
1015
  * Options for delete datasource operation.
841
1016
  */
@@ -923,6 +1098,20 @@ export declare interface DistanceScoringParameters {
923
1098
  boostingDistance: number;
924
1099
  }
925
1100
 
1101
+ /** Contains debugging information that can be used to further explore your search results. */
1102
+ export declare interface DocumentDebugInfo {
1103
+ /**
1104
+ * Contains debugging information specific to semantic search queries.
1105
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1106
+ */
1107
+ readonly semantic?: SemanticDebugInfo;
1108
+ /**
1109
+ * Contains debugging information specific to vector and hybrid search.
1110
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1111
+ */
1112
+ readonly vectors?: VectorsDebugInfo;
1113
+ }
1114
+
926
1115
  /** A skill that extracts content from a file within the enrichment pipeline. */
927
1116
  export declare interface DocumentExtractionSkill extends BaseSearchIndexerSkill {
928
1117
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -1153,6 +1342,11 @@ export declare class GeographyPoint {
1153
1342
  toJSON(): Record<string, unknown>;
1154
1343
  }
1155
1344
 
1345
+ /**
1346
+ * Options for get alias operation.
1347
+ */
1348
+ export declare type GetAliasOptions = OperationOptions;
1349
+
1156
1350
  /**
1157
1351
  * Options for get datasource operation.
1158
1352
  */
@@ -1257,6 +1451,24 @@ export declare interface HnswParameters {
1257
1451
  metric?: VectorSearchAlgorithmMetric;
1258
1452
  }
1259
1453
 
1454
+ /**
1455
+ * Defines values for HybridCountAndFacetMode. \
1456
+ * {@link KnownHybridCountAndFacetMode} can be used interchangeably with HybridCountAndFacetMode,
1457
+ * this enum contains the known values that the service supports.
1458
+ * ### Known values supported by the service
1459
+ * **countRetrievableResults**: Only include documents that were matched within the 'maxTextRecallSize' retrieval window when computing 'count' and 'facets'. \
1460
+ * **countAllResults**: Include all documents that were matched by the search query when computing 'count' and 'facets', regardless of whether or not those documents are within the 'maxTextRecallSize' retrieval window.
1461
+ */
1462
+ export declare type HybridCountAndFacetMode = string;
1463
+
1464
+ /** TThe query parameters to configure hybrid search behaviors. */
1465
+ export declare interface HybridSearchOptions {
1466
+ /** Determines the maximum number of documents to be retrieved by the text query portion of a hybrid search request. Those documents will be combined with the documents matching the vector queries to produce a single final list of results. Choosing a larger maxTextRecallSize value will allow retrieving and paging through more documents (using the top and skip parameters), at the cost of higher resource utilization and higher latency. The value needs to be between 1 and 10,000. Default is 1000. */
1467
+ maxTextRecallSize?: number;
1468
+ /** Determines whether the count and facets should includes all documents that matched the search query, or only the documents that are retrieved within the 'maxTextRecallSize' window. */
1469
+ countAndFacetMode?: HybridCountAndFacetMode;
1470
+ }
1471
+
1260
1472
  /** A skill that analyzes image files. It extracts a rich set of visual features based on the image content. */
1261
1473
  export declare interface ImageAnalysisSkill extends BaseSearchIndexerSkill {
1262
1474
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -1382,6 +1594,16 @@ export declare interface IndexerExecutionResult {
1382
1594
  * NOTE: This property will not be serialized. It can only be populated by the server.
1383
1595
  */
1384
1596
  readonly status: IndexerExecutionStatus;
1597
+ /**
1598
+ * The outcome of this indexer execution.
1599
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1600
+ */
1601
+ readonly statusDetail?: IndexerExecutionStatusDetail;
1602
+ /**
1603
+ * All of the state that defines and dictates the indexer's current execution.
1604
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1605
+ */
1606
+ readonly currentState?: IndexerState;
1385
1607
  /**
1386
1608
  * The error message indicating the top-level error, if any.
1387
1609
  * NOTE: This property will not be serialized. It can only be populated by the server.
@@ -1432,9 +1654,67 @@ export declare interface IndexerExecutionResult {
1432
1654
  /** Defines values for IndexerExecutionStatus. */
1433
1655
  export declare type IndexerExecutionStatus = "transientFailure" | "success" | "inProgress" | "reset";
1434
1656
 
1657
+ /**
1658
+ * Defines values for IndexerExecutionStatusDetail. \
1659
+ * {@link KnownIndexerExecutionStatusDetail} can be used interchangeably with IndexerExecutionStatusDetail,
1660
+ * this enum contains the known values that the service supports.
1661
+ * ### Known values supported by the service
1662
+ * **resetDocs**: Indicates that the reset that occurred was for a call to ResetDocs.
1663
+ */
1664
+ export declare type IndexerExecutionStatusDetail = string;
1665
+
1666
+ /** Represents all of the state that defines and dictates the indexer's current execution. */
1667
+ export declare interface IndexerState {
1668
+ /**
1669
+ * The mode the indexer is running in.
1670
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1671
+ */
1672
+ readonly mode?: IndexingMode;
1673
+ /**
1674
+ * Change tracking state used when indexing starts on all documents in the datasource.
1675
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1676
+ */
1677
+ readonly allDocumentsInitialChangeTrackingState?: string;
1678
+ /**
1679
+ * Change tracking state value when indexing finishes on all documents in the datasource.
1680
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1681
+ */
1682
+ readonly allDocumentsFinalChangeTrackingState?: string;
1683
+ /**
1684
+ * Change tracking state used when indexing starts on select, reset documents in the datasource.
1685
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1686
+ */
1687
+ readonly resetDocumentsInitialChangeTrackingState?: string;
1688
+ /**
1689
+ * Change tracking state value when indexing finishes on select, reset documents in the datasource.
1690
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1691
+ */
1692
+ readonly resetDocumentsFinalChangeTrackingState?: string;
1693
+ /**
1694
+ * The list of document keys that have been reset. The document key is the document's unique identifier for the data in the search index. The indexer will prioritize selectively re-ingesting these keys.
1695
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1696
+ */
1697
+ readonly resetDocumentKeys?: string[];
1698
+ /**
1699
+ * The list of datasource document ids that have been reset. The datasource document id is the unique identifier for the data in the datasource. The indexer will prioritize selectively re-ingesting these ids.
1700
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1701
+ */
1702
+ readonly resetDatasourceDocumentIds?: string[];
1703
+ }
1704
+
1435
1705
  /** Defines values for IndexerStatus. */
1436
1706
  export declare type IndexerStatus = "unknown" | "error" | "running";
1437
1707
 
1708
+ /**
1709
+ * Defines values for IndexingMode. \
1710
+ * {@link KnownIndexingMode} can be used interchangeably with IndexingMode,
1711
+ * this enum contains the known values that the service supports.
1712
+ * ### Known values supported by the service
1713
+ * **indexingAllDocs**: The indexer is indexing all documents in the datasource. \
1714
+ * **indexingResetDocs**: The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status.
1715
+ */
1716
+ export declare type IndexingMode = string;
1717
+
1438
1718
  /** Represents parameters for indexer execution. */
1439
1719
  export declare interface IndexingParameters {
1440
1720
  /** The number of items that are read from the data source and indexed as a single batch in order to improve performance. The default depends on the data source type. */
@@ -1563,6 +1843,18 @@ export declare interface KeepTokenFilter extends BaseTokenFilter {
1563
1843
  lowerCaseKeepWords?: boolean;
1564
1844
  }
1565
1845
 
1846
+ /**
1847
+ * Specifies the properties for connecting to an AML vectorizer with an authentication key.
1848
+ */
1849
+ export declare interface KeyAuthAzureMachineLearningVectorizerParameters extends BaseAzureMachineLearningVectorizerParameters {
1850
+ /** Indicates how the service should attempt to identify itself to the AML instance */
1851
+ authKind: "key";
1852
+ /** The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */
1853
+ scoringUri: string;
1854
+ /** The key for the AML service. */
1855
+ authenticationKey: string;
1856
+ }
1857
+
1566
1858
  /** A skill that uses text analytics for key phrase extraction. */
1567
1859
  export declare interface KeyPhraseExtractionSkill extends BaseSearchIndexerSkill {
1568
1860
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -1608,6 +1900,22 @@ export declare interface KeywordTokenizer {
1608
1900
  maxTokenLength?: number;
1609
1901
  }
1610
1902
 
1903
+ /** Known values of {@link AIStudioModelCatalogName} that the service accepts. */
1904
+ export declare enum KnownAIStudioModelCatalogName {
1905
+ /** OpenAIClipImageTextEmbeddingsVitBasePatch32 */
1906
+ OpenAIClipImageTextEmbeddingsVitBasePatch32 = "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32",
1907
+ /** OpenAIClipImageTextEmbeddingsViTLargePatch14336 */
1908
+ OpenAIClipImageTextEmbeddingsViTLargePatch14336 = "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336",
1909
+ /** FacebookDinoV2ImageEmbeddingsViTBase */
1910
+ FacebookDinoV2ImageEmbeddingsViTBase = "Facebook-DinoV2-Image-Embeddings-ViT-Base",
1911
+ /** FacebookDinoV2ImageEmbeddingsViTGiant */
1912
+ FacebookDinoV2ImageEmbeddingsViTGiant = "Facebook-DinoV2-Image-Embeddings-ViT-Giant",
1913
+ /** CohereEmbedV3English */
1914
+ CohereEmbedV3English = "Cohere-embed-v3-english",
1915
+ /** CohereEmbedV3Multilingual */
1916
+ CohereEmbedV3Multilingual = "Cohere-embed-v3-multilingual"
1917
+ }
1918
+
1611
1919
  /**
1612
1920
  * Defines values for AnalyzerName.
1613
1921
  * See https://docs.microsoft.com/rest/api/searchservice/Language-support
@@ -2041,9 +2349,15 @@ export declare enum KnownBlobIndexerPDFTextRotationAlgorithm {
2041
2349
  DetectAngles = "detectAngles"
2042
2350
  }
2043
2351
 
2044
- /** Known values of {@link CharFilterName} that the service accepts. */
2352
+ /**
2353
+ * Defines values for CharFilterName.
2354
+ * @readonly
2355
+ */
2045
2356
  export declare enum KnownCharFilterNames {
2046
- /** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */
2357
+ /**
2358
+ * A character filter that attempts to strip out HTML constructs. See
2359
+ * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.html
2360
+ */
2047
2361
  HtmlStrip = "html_strip"
2048
2362
  }
2049
2363
 
@@ -2137,6 +2451,14 @@ export declare enum KnownEntityRecognitionSkillLanguage {
2137
2451
  Tr = "tr"
2138
2452
  }
2139
2453
 
2454
+ /** Known values of {@link HybridCountAndFacetMode} that the service accepts. */
2455
+ export declare enum KnownHybridCountAndFacetMode {
2456
+ /** Only include documents that were matched within the 'maxTextRecallSize' retrieval window when computing 'count' and 'facets'. */
2457
+ CountRetrievableResults = "countRetrievableResults",
2458
+ /** Include all documents that were matched by the search query when computing 'count' and 'facets', regardless of whether or not those documents are within the 'maxTextRecallSize' retrieval window. */
2459
+ CountAllResults = "countAllResults"
2460
+ }
2461
+
2140
2462
  /** Known values of {@link ImageAnalysisSkillLanguage} that the service accepts. */
2141
2463
  export declare enum KnownImageAnalysisSkillLanguage {
2142
2464
  /** Arabic */
@@ -2261,6 +2583,20 @@ export declare enum KnownIndexerExecutionEnvironment {
2261
2583
  Private = "private"
2262
2584
  }
2263
2585
 
2586
+ /** Known values of {@link IndexerExecutionStatusDetail} that the service accepts. */
2587
+ export declare enum KnownIndexerExecutionStatusDetail {
2588
+ /** Indicates that the reset that occurred was for a call to ResetDocs. */
2589
+ ResetDocs = "resetDocs"
2590
+ }
2591
+
2592
+ /** Known values of {@link IndexingMode} that the service accepts. */
2593
+ export declare enum KnownIndexingMode {
2594
+ /** The indexer is indexing all documents in the datasource. */
2595
+ IndexingAllDocs = "indexingAllDocs",
2596
+ /** The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status. */
2597
+ IndexingResetDocs = "indexingResetDocs"
2598
+ }
2599
+
2264
2600
  /** Known values of {@link IndexProjectionMode} that the service accepts. */
2265
2601
  export declare enum KnownIndexProjectionMode {
2266
2602
  /** The source document will be skipped from writing into the indexer's target index. */
@@ -2305,6 +2641,212 @@ export declare enum KnownKeyPhraseExtractionSkillLanguage {
2305
2641
  Sv = "sv"
2306
2642
  }
2307
2643
 
2644
+ /** Known values of {@link LexicalAnalyzerName} that the service accepts. */
2645
+ export declare enum KnownLexicalAnalyzerName {
2646
+ /** Microsoft analyzer for Arabic. */
2647
+ ArMicrosoft = "ar.microsoft",
2648
+ /** Lucene analyzer for Arabic. */
2649
+ ArLucene = "ar.lucene",
2650
+ /** Lucene analyzer for Armenian. */
2651
+ HyLucene = "hy.lucene",
2652
+ /** Microsoft analyzer for Bangla. */
2653
+ BnMicrosoft = "bn.microsoft",
2654
+ /** Lucene analyzer for Basque. */
2655
+ EuLucene = "eu.lucene",
2656
+ /** Microsoft analyzer for Bulgarian. */
2657
+ BgMicrosoft = "bg.microsoft",
2658
+ /** Lucene analyzer for Bulgarian. */
2659
+ BgLucene = "bg.lucene",
2660
+ /** Microsoft analyzer for Catalan. */
2661
+ CaMicrosoft = "ca.microsoft",
2662
+ /** Lucene analyzer for Catalan. */
2663
+ CaLucene = "ca.lucene",
2664
+ /** Microsoft analyzer for Chinese (Simplified). */
2665
+ ZhHansMicrosoft = "zh-Hans.microsoft",
2666
+ /** Lucene analyzer for Chinese (Simplified). */
2667
+ ZhHansLucene = "zh-Hans.lucene",
2668
+ /** Microsoft analyzer for Chinese (Traditional). */
2669
+ ZhHantMicrosoft = "zh-Hant.microsoft",
2670
+ /** Lucene analyzer for Chinese (Traditional). */
2671
+ ZhHantLucene = "zh-Hant.lucene",
2672
+ /** Microsoft analyzer for Croatian. */
2673
+ HrMicrosoft = "hr.microsoft",
2674
+ /** Microsoft analyzer for Czech. */
2675
+ CsMicrosoft = "cs.microsoft",
2676
+ /** Lucene analyzer for Czech. */
2677
+ CsLucene = "cs.lucene",
2678
+ /** Microsoft analyzer for Danish. */
2679
+ DaMicrosoft = "da.microsoft",
2680
+ /** Lucene analyzer for Danish. */
2681
+ DaLucene = "da.lucene",
2682
+ /** Microsoft analyzer for Dutch. */
2683
+ NlMicrosoft = "nl.microsoft",
2684
+ /** Lucene analyzer for Dutch. */
2685
+ NlLucene = "nl.lucene",
2686
+ /** Microsoft analyzer for English. */
2687
+ EnMicrosoft = "en.microsoft",
2688
+ /** Lucene analyzer for English. */
2689
+ EnLucene = "en.lucene",
2690
+ /** Microsoft analyzer for Estonian. */
2691
+ EtMicrosoft = "et.microsoft",
2692
+ /** Microsoft analyzer for Finnish. */
2693
+ FiMicrosoft = "fi.microsoft",
2694
+ /** Lucene analyzer for Finnish. */
2695
+ FiLucene = "fi.lucene",
2696
+ /** Microsoft analyzer for French. */
2697
+ FrMicrosoft = "fr.microsoft",
2698
+ /** Lucene analyzer for French. */
2699
+ FrLucene = "fr.lucene",
2700
+ /** Lucene analyzer for Galician. */
2701
+ GlLucene = "gl.lucene",
2702
+ /** Microsoft analyzer for German. */
2703
+ DeMicrosoft = "de.microsoft",
2704
+ /** Lucene analyzer for German. */
2705
+ DeLucene = "de.lucene",
2706
+ /** Microsoft analyzer for Greek. */
2707
+ ElMicrosoft = "el.microsoft",
2708
+ /** Lucene analyzer for Greek. */
2709
+ ElLucene = "el.lucene",
2710
+ /** Microsoft analyzer for Gujarati. */
2711
+ GuMicrosoft = "gu.microsoft",
2712
+ /** Microsoft analyzer for Hebrew. */
2713
+ HeMicrosoft = "he.microsoft",
2714
+ /** Microsoft analyzer for Hindi. */
2715
+ HiMicrosoft = "hi.microsoft",
2716
+ /** Lucene analyzer for Hindi. */
2717
+ HiLucene = "hi.lucene",
2718
+ /** Microsoft analyzer for Hungarian. */
2719
+ HuMicrosoft = "hu.microsoft",
2720
+ /** Lucene analyzer for Hungarian. */
2721
+ HuLucene = "hu.lucene",
2722
+ /** Microsoft analyzer for Icelandic. */
2723
+ IsMicrosoft = "is.microsoft",
2724
+ /** Microsoft analyzer for Indonesian (Bahasa). */
2725
+ IdMicrosoft = "id.microsoft",
2726
+ /** Lucene analyzer for Indonesian. */
2727
+ IdLucene = "id.lucene",
2728
+ /** Lucene analyzer for Irish. */
2729
+ GaLucene = "ga.lucene",
2730
+ /** Microsoft analyzer for Italian. */
2731
+ ItMicrosoft = "it.microsoft",
2732
+ /** Lucene analyzer for Italian. */
2733
+ ItLucene = "it.lucene",
2734
+ /** Microsoft analyzer for Japanese. */
2735
+ JaMicrosoft = "ja.microsoft",
2736
+ /** Lucene analyzer for Japanese. */
2737
+ JaLucene = "ja.lucene",
2738
+ /** Microsoft analyzer for Kannada. */
2739
+ KnMicrosoft = "kn.microsoft",
2740
+ /** Microsoft analyzer for Korean. */
2741
+ KoMicrosoft = "ko.microsoft",
2742
+ /** Lucene analyzer for Korean. */
2743
+ KoLucene = "ko.lucene",
2744
+ /** Microsoft analyzer for Latvian. */
2745
+ LvMicrosoft = "lv.microsoft",
2746
+ /** Lucene analyzer for Latvian. */
2747
+ LvLucene = "lv.lucene",
2748
+ /** Microsoft analyzer for Lithuanian. */
2749
+ LtMicrosoft = "lt.microsoft",
2750
+ /** Microsoft analyzer for Malayalam. */
2751
+ MlMicrosoft = "ml.microsoft",
2752
+ /** Microsoft analyzer for Malay (Latin). */
2753
+ MsMicrosoft = "ms.microsoft",
2754
+ /** Microsoft analyzer for Marathi. */
2755
+ MrMicrosoft = "mr.microsoft",
2756
+ /** Microsoft analyzer for Norwegian (Bokmål). */
2757
+ NbMicrosoft = "nb.microsoft",
2758
+ /** Lucene analyzer for Norwegian. */
2759
+ NoLucene = "no.lucene",
2760
+ /** Lucene analyzer for Persian. */
2761
+ FaLucene = "fa.lucene",
2762
+ /** Microsoft analyzer for Polish. */
2763
+ PlMicrosoft = "pl.microsoft",
2764
+ /** Lucene analyzer for Polish. */
2765
+ PlLucene = "pl.lucene",
2766
+ /** Microsoft analyzer for Portuguese (Brazil). */
2767
+ PtBrMicrosoft = "pt-BR.microsoft",
2768
+ /** Lucene analyzer for Portuguese (Brazil). */
2769
+ PtBrLucene = "pt-BR.lucene",
2770
+ /** Microsoft analyzer for Portuguese (Portugal). */
2771
+ PtPtMicrosoft = "pt-PT.microsoft",
2772
+ /** Lucene analyzer for Portuguese (Portugal). */
2773
+ PtPtLucene = "pt-PT.lucene",
2774
+ /** Microsoft analyzer for Punjabi. */
2775
+ PaMicrosoft = "pa.microsoft",
2776
+ /** Microsoft analyzer for Romanian. */
2777
+ RoMicrosoft = "ro.microsoft",
2778
+ /** Lucene analyzer for Romanian. */
2779
+ RoLucene = "ro.lucene",
2780
+ /** Microsoft analyzer for Russian. */
2781
+ RuMicrosoft = "ru.microsoft",
2782
+ /** Lucene analyzer for Russian. */
2783
+ RuLucene = "ru.lucene",
2784
+ /** Microsoft analyzer for Serbian (Cyrillic). */
2785
+ SrCyrillicMicrosoft = "sr-cyrillic.microsoft",
2786
+ /** Microsoft analyzer for Serbian (Latin). */
2787
+ SrLatinMicrosoft = "sr-latin.microsoft",
2788
+ /** Microsoft analyzer for Slovak. */
2789
+ SkMicrosoft = "sk.microsoft",
2790
+ /** Microsoft analyzer for Slovenian. */
2791
+ SlMicrosoft = "sl.microsoft",
2792
+ /** Microsoft analyzer for Spanish. */
2793
+ EsMicrosoft = "es.microsoft",
2794
+ /** Lucene analyzer for Spanish. */
2795
+ EsLucene = "es.lucene",
2796
+ /** Microsoft analyzer for Swedish. */
2797
+ SvMicrosoft = "sv.microsoft",
2798
+ /** Lucene analyzer for Swedish. */
2799
+ SvLucene = "sv.lucene",
2800
+ /** Microsoft analyzer for Tamil. */
2801
+ TaMicrosoft = "ta.microsoft",
2802
+ /** Microsoft analyzer for Telugu. */
2803
+ TeMicrosoft = "te.microsoft",
2804
+ /** Microsoft analyzer for Thai. */
2805
+ ThMicrosoft = "th.microsoft",
2806
+ /** Lucene analyzer for Thai. */
2807
+ ThLucene = "th.lucene",
2808
+ /** Microsoft analyzer for Turkish. */
2809
+ TrMicrosoft = "tr.microsoft",
2810
+ /** Lucene analyzer for Turkish. */
2811
+ TrLucene = "tr.lucene",
2812
+ /** Microsoft analyzer for Ukrainian. */
2813
+ UkMicrosoft = "uk.microsoft",
2814
+ /** Microsoft analyzer for Urdu. */
2815
+ UrMicrosoft = "ur.microsoft",
2816
+ /** Microsoft analyzer for Vietnamese. */
2817
+ ViMicrosoft = "vi.microsoft",
2818
+ /** Standard Lucene analyzer. */
2819
+ StandardLucene = "standard.lucene",
2820
+ /** Standard ASCII Folding Lucene analyzer. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#Analyzers */
2821
+ StandardAsciiFoldingLucene = "standardasciifolding.lucene",
2822
+ /** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordAnalyzer.html */
2823
+ Keyword = "keyword",
2824
+ /** Flexibly separates text into terms via a regular expression pattern. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/PatternAnalyzer.html */
2825
+ Pattern = "pattern",
2826
+ /** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/SimpleAnalyzer.html */
2827
+ Simple = "simple",
2828
+ /** Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopAnalyzer.html */
2829
+ Stop = "stop",
2830
+ /** An analyzer that uses the whitespace tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceAnalyzer.html */
2831
+ Whitespace = "whitespace"
2832
+ }
2833
+
2834
+ /** Known values of {@link LexicalNormalizerName} that the service accepts. */
2835
+ declare enum KnownLexicalNormalizerName {
2836
+ /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
2837
+ AsciiFolding = "asciifolding",
2838
+ /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
2839
+ Elision = "elision",
2840
+ /** Normalizes token text to lowercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
2841
+ Lowercase = "lowercase",
2842
+ /** Standard normalizer, which consists of lowercase and asciifolding. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
2843
+ Standard = "standard",
2844
+ /** Normalizes token text to uppercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
2845
+ Uppercase = "uppercase"
2846
+ }
2847
+ export { KnownLexicalNormalizerName }
2848
+ export { KnownLexicalNormalizerName as KnownNormalizerNames }
2849
+
2308
2850
  /** Known values of {@link OcrLineEnding} that the service accepts. */
2309
2851
  export declare enum KnownOcrLineEnding {
2310
2852
  /** Lines are separated by a single space character. */
@@ -2669,6 +3211,170 @@ export declare enum KnownPIIDetectionSkillMaskingMode {
2669
3211
  Replace = "replace"
2670
3212
  }
2671
3213
 
3214
+ /** Known values of {@link QueryDebugMode} that the service accepts. */
3215
+ export declare enum KnownQueryDebugMode {
3216
+ /** No query debugging information will be returned. */
3217
+ Disabled = "disabled",
3218
+ /** Allows the user to further explore their reranked results. */
3219
+ Semantic = "semantic"
3220
+ }
3221
+
3222
+ /** Known values of {@link QueryLanguage} that the service accepts. */
3223
+ export declare enum KnownQueryLanguage {
3224
+ /** Query language not specified. */
3225
+ None = "none",
3226
+ /** Query language value for English (United States). */
3227
+ EnUs = "en-us",
3228
+ /** Query language value for English (Great Britain). */
3229
+ EnGb = "en-gb",
3230
+ /** Query language value for English (India). */
3231
+ EnIn = "en-in",
3232
+ /** Query language value for English (Canada). */
3233
+ EnCa = "en-ca",
3234
+ /** Query language value for English (Australia). */
3235
+ EnAu = "en-au",
3236
+ /** Query language value for French (France). */
3237
+ FrFr = "fr-fr",
3238
+ /** Query language value for French (Canada). */
3239
+ FrCa = "fr-ca",
3240
+ /** Query language value for German (Germany). */
3241
+ DeDe = "de-de",
3242
+ /** Query language value for Spanish (Spain). */
3243
+ EsEs = "es-es",
3244
+ /** Query language value for Spanish (Mexico). */
3245
+ EsMx = "es-mx",
3246
+ /** Query language value for Chinese (China). */
3247
+ ZhCn = "zh-cn",
3248
+ /** Query language value for Chinese (Taiwan). */
3249
+ ZhTw = "zh-tw",
3250
+ /** Query language value for Portuguese (Brazil). */
3251
+ PtBr = "pt-br",
3252
+ /** Query language value for Portuguese (Portugal). */
3253
+ PtPt = "pt-pt",
3254
+ /** Query language value for Italian (Italy). */
3255
+ ItIt = "it-it",
3256
+ /** Query language value for Japanese (Japan). */
3257
+ JaJp = "ja-jp",
3258
+ /** Query language value for Korean (Korea). */
3259
+ KoKr = "ko-kr",
3260
+ /** Query language value for Russian (Russia). */
3261
+ RuRu = "ru-ru",
3262
+ /** Query language value for Czech (Czech Republic). */
3263
+ CsCz = "cs-cz",
3264
+ /** Query language value for Dutch (Belgium). */
3265
+ NlBe = "nl-be",
3266
+ /** Query language value for Dutch (Netherlands). */
3267
+ NlNl = "nl-nl",
3268
+ /** Query language value for Hungarian (Hungary). */
3269
+ HuHu = "hu-hu",
3270
+ /** Query language value for Polish (Poland). */
3271
+ PlPl = "pl-pl",
3272
+ /** Query language value for Swedish (Sweden). */
3273
+ SvSe = "sv-se",
3274
+ /** Query language value for Turkish (Turkey). */
3275
+ TrTr = "tr-tr",
3276
+ /** Query language value for Hindi (India). */
3277
+ HiIn = "hi-in",
3278
+ /** Query language value for Arabic (Saudi Arabia). */
3279
+ ArSa = "ar-sa",
3280
+ /** Query language value for Arabic (Egypt). */
3281
+ ArEg = "ar-eg",
3282
+ /** Query language value for Arabic (Morocco). */
3283
+ ArMa = "ar-ma",
3284
+ /** Query language value for Arabic (Kuwait). */
3285
+ ArKw = "ar-kw",
3286
+ /** Query language value for Arabic (Jordan). */
3287
+ ArJo = "ar-jo",
3288
+ /** Query language value for Danish (Denmark). */
3289
+ DaDk = "da-dk",
3290
+ /** Query language value for Norwegian (Norway). */
3291
+ NoNo = "no-no",
3292
+ /** Query language value for Bulgarian (Bulgaria). */
3293
+ BgBg = "bg-bg",
3294
+ /** Query language value for Croatian (Croatia). */
3295
+ HrHr = "hr-hr",
3296
+ /** Query language value for Croatian (Bosnia and Herzegovina). */
3297
+ HrBa = "hr-ba",
3298
+ /** Query language value for Malay (Malaysia). */
3299
+ MsMy = "ms-my",
3300
+ /** Query language value for Malay (Brunei Darussalam). */
3301
+ MsBn = "ms-bn",
3302
+ /** Query language value for Slovenian (Slovenia). */
3303
+ SlSl = "sl-sl",
3304
+ /** Query language value for Tamil (India). */
3305
+ TaIn = "ta-in",
3306
+ /** Query language value for Vietnamese (Viet Nam). */
3307
+ ViVn = "vi-vn",
3308
+ /** Query language value for Greek (Greece). */
3309
+ ElGr = "el-gr",
3310
+ /** Query language value for Romanian (Romania). */
3311
+ RoRo = "ro-ro",
3312
+ /** Query language value for Icelandic (Iceland). */
3313
+ IsIs = "is-is",
3314
+ /** Query language value for Indonesian (Indonesia). */
3315
+ IdId = "id-id",
3316
+ /** Query language value for Thai (Thailand). */
3317
+ ThTh = "th-th",
3318
+ /** Query language value for Lithuanian (Lithuania). */
3319
+ LtLt = "lt-lt",
3320
+ /** Query language value for Ukrainian (Ukraine). */
3321
+ UkUa = "uk-ua",
3322
+ /** Query language value for Latvian (Latvia). */
3323
+ LvLv = "lv-lv",
3324
+ /** Query language value for Estonian (Estonia). */
3325
+ EtEe = "et-ee",
3326
+ /** Query language value for Catalan. */
3327
+ CaEs = "ca-es",
3328
+ /** Query language value for Finnish (Finland). */
3329
+ FiFi = "fi-fi",
3330
+ /** Query language value for Serbian (Bosnia and Herzegovina). */
3331
+ SrBa = "sr-ba",
3332
+ /** Query language value for Serbian (Montenegro). */
3333
+ SrMe = "sr-me",
3334
+ /** Query language value for Serbian (Serbia). */
3335
+ SrRs = "sr-rs",
3336
+ /** Query language value for Slovak (Slovakia). */
3337
+ SkSk = "sk-sk",
3338
+ /** Query language value for Norwegian (Norway). */
3339
+ NbNo = "nb-no",
3340
+ /** Query language value for Armenian (Armenia). */
3341
+ HyAm = "hy-am",
3342
+ /** Query language value for Bengali (India). */
3343
+ BnIn = "bn-in",
3344
+ /** Query language value for Basque. */
3345
+ EuEs = "eu-es",
3346
+ /** Query language value for Galician. */
3347
+ GlEs = "gl-es",
3348
+ /** Query language value for Gujarati (India). */
3349
+ GuIn = "gu-in",
3350
+ /** Query language value for Hebrew (Israel). */
3351
+ HeIl = "he-il",
3352
+ /** Query language value for Irish (Ireland). */
3353
+ GaIe = "ga-ie",
3354
+ /** Query language value for Kannada (India). */
3355
+ KnIn = "kn-in",
3356
+ /** Query language value for Malayalam (India). */
3357
+ MlIn = "ml-in",
3358
+ /** Query language value for Marathi (India). */
3359
+ MrIn = "mr-in",
3360
+ /** Query language value for Persian (U.A.E.). */
3361
+ FaAe = "fa-ae",
3362
+ /** Query language value for Punjabi (India). */
3363
+ PaIn = "pa-in",
3364
+ /** Query language value for Telugu (India). */
3365
+ TeIn = "te-in",
3366
+ /** Query language value for Urdu (Pakistan). */
3367
+ UrPk = "ur-pk"
3368
+ }
3369
+
3370
+ /** Known values of {@link QuerySpellerType} that the service accepts. */
3371
+ export declare enum KnownQuerySpellerType {
3372
+ /** Speller not enabled. */
3373
+ None = "none",
3374
+ /** Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter. */
3375
+ Lexicon = "lexicon"
3376
+ }
3377
+
2672
3378
  /** Known values of {@link RegexFlags} that the service accepts. */
2673
3379
  export declare enum KnownRegexFlags {
2674
3380
  /** Enables canonical equivalence. */
@@ -2750,7 +3456,9 @@ export declare enum KnownSearchIndexerDataSourceType {
2750
3456
  /** Indicates a MySql datasource. */
2751
3457
  MySql = "mysql",
2752
3458
  /** Indicates an ADLS Gen2 datasource. */
2753
- AdlsGen2 = "adlsgen2"
3459
+ AdlsGen2 = "adlsgen2",
3460
+ /** Indicates a Microsoft Fabric OneLake datasource. */
3461
+ OneLake = "onelake"
2754
3462
  }
2755
3463
 
2756
3464
  /** Known values of {@link SemanticErrorMode} that the service accepts. */
@@ -2771,6 +3479,16 @@ export declare enum KnownSemanticErrorReason {
2771
3479
  Transient = "transient"
2772
3480
  }
2773
3481
 
3482
+ /** Known values of {@link SemanticFieldState} that the service accepts. */
3483
+ export declare enum KnownSemanticFieldState {
3484
+ /** The field was fully used for semantic enrichment. */
3485
+ Used = "used",
3486
+ /** The field was not used for semantic enrichment. */
3487
+ Unused = "unused",
3488
+ /** The field was partially used for semantic enrichment. */
3489
+ Partial = "partial"
3490
+ }
3491
+
2774
3492
  /** Known values of {@link SemanticSearchResultsType} that the service accepts. */
2775
3493
  export declare enum KnownSemanticSearchResultsType {
2776
3494
  /** Results without any semantic enrichment or reranking. */
@@ -2813,6 +3531,26 @@ export declare enum KnownSentimentSkillLanguage {
2813
3531
  Tr = "tr"
2814
3532
  }
2815
3533
 
3534
+ /** Known values of {@link Speller} that the service accepts. */
3535
+ export declare enum KnownSpeller {
3536
+ /** Speller not enabled. */
3537
+ None = "none",
3538
+ /** Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter. */
3539
+ Lexicon = "lexicon"
3540
+ }
3541
+
3542
+ /** Known values of {@link SplitSkillEncoderModelName} that the service accepts. */
3543
+ export declare enum KnownSplitSkillEncoderModelName {
3544
+ /** Refers to a base model trained with a 50,000 token vocabulary, often used in general natural language processing tasks. */
3545
+ R50KBase = "r50k_base",
3546
+ /** A base model with a 50,000 token vocabulary, optimized for prompt-based tasks. */
3547
+ P50KBase = "p50k_base",
3548
+ /** Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token vocabulary. */
3549
+ P50KEdit = "p50k_edit",
3550
+ /** A base model with a 100,000 token vocabulary. */
3551
+ CL100KBase = "cl100k_base"
3552
+ }
3553
+
2816
3554
  /** Known values of {@link SplitSkillLanguage} that the service accepts. */
2817
3555
  export declare enum KnownSplitSkillLanguage {
2818
3556
  /** Amharic */
@@ -2883,6 +3621,14 @@ export declare enum KnownSplitSkillLanguage {
2883
3621
  Zh = "zh"
2884
3622
  }
2885
3623
 
3624
+ /** Known values of {@link SplitSkillUnit} that the service accepts. */
3625
+ export declare enum KnownSplitSkillUnit {
3626
+ /** The length will be measured by character. */
3627
+ Characters = "characters",
3628
+ /** The length will be measured by an AzureOpenAI tokenizer from the tiktoken library. */
3629
+ AzureOpenAITokens = "azureOpenAITokens"
3630
+ }
3631
+
2886
3632
  /** Known values of {@link TextSplitMode} that the service accepts. */
2887
3633
  export declare enum KnownTextSplitMode {
2888
3634
  /** Split the text into individual pages. */
@@ -3039,105 +3785,257 @@ export declare enum KnownTextTranslationSkillLanguage {
3039
3785
  Pa = "pa"
3040
3786
  }
3041
3787
 
3042
- /** Known values of {@link TokenFilterName} that the service accepts. */
3788
+ /**
3789
+ * Defines values for TokenFilterName.
3790
+ * @readonly
3791
+ */
3043
3792
  export declare enum KnownTokenFilterNames {
3044
- /** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html */
3793
+ /**
3794
+ * A token filter that applies the Arabic normalizer to normalize the orthography. See
3795
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html
3796
+ */
3045
3797
  ArabicNormalization = "arabic_normalization",
3046
- /** Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html */
3798
+ /**
3799
+ * Strips all characters after an apostrophe (including the apostrophe itself). See
3800
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html
3801
+ */
3047
3802
  Apostrophe = "apostrophe",
3048
- /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
3803
+ /**
3804
+ * Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127
3805
+ * ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such
3806
+ * equivalents exist. See
3807
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html
3808
+ */
3049
3809
  AsciiFolding = "asciifolding",
3050
- /** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html */
3810
+ /**
3811
+ * Forms bigrams of CJK terms that are generated from StandardTokenizer. See
3812
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html
3813
+ */
3051
3814
  CjkBigram = "cjk_bigram",
3052
- /** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html */
3815
+ /**
3816
+ * Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic
3817
+ * Latin, and half-width Katakana variants into the equivalent Kana. See
3818
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html
3819
+ */
3053
3820
  CjkWidth = "cjk_width",
3054
- /** Removes English possessives, and dots from acronyms. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html */
3821
+ /**
3822
+ * Removes English possessives, and dots from acronyms. See
3823
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html
3824
+ */
3055
3825
  Classic = "classic",
3056
- /** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html */
3826
+ /**
3827
+ * Construct bigrams for frequently occurring terms while indexing. Single terms are still
3828
+ * indexed too, with bigrams overlaid. See
3829
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html
3830
+ */
3057
3831
  CommonGram = "common_grams",
3058
- /** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html */
3832
+ /**
3833
+ * Generates n-grams of the given size(s) starting from the front or the back of an input token.
3834
+ * See
3835
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html
3836
+ */
3059
3837
  EdgeNGram = "edgeNGram_v2",
3060
- /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
3838
+ /**
3839
+ * Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See
3840
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html
3841
+ */
3061
3842
  Elision = "elision",
3062
- /** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html */
3843
+ /**
3844
+ * Normalizes German characters according to the heuristics of the German2 snowball algorithm.
3845
+ * See
3846
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html
3847
+ */
3063
3848
  GermanNormalization = "german_normalization",
3064
- /** Normalizes text in Hindi to remove some differences in spelling variations. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html */
3849
+ /**
3850
+ * Normalizes text in Hindi to remove some differences in spelling variations. See
3851
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html
3852
+ */
3065
3853
  HindiNormalization = "hindi_normalization",
3066
- /** Normalizes the Unicode representation of text in Indian languages. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html */
3854
+ /**
3855
+ * Normalizes the Unicode representation of text in Indian languages. See
3856
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html
3857
+ */
3067
3858
  IndicNormalization = "indic_normalization",
3068
- /** Emits each incoming token twice, once as keyword and once as non-keyword. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html */
3859
+ /**
3860
+ * Emits each incoming token twice, once as keyword and once as non-keyword. See
3861
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html
3862
+ */
3069
3863
  KeywordRepeat = "keyword_repeat",
3070
- /** A high-performance kstem filter for English. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html */
3864
+ /**
3865
+ * A high-performance kstem filter for English. See
3866
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html
3867
+ */
3071
3868
  KStem = "kstem",
3072
- /** Removes words that are too long or too short. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html */
3869
+ /**
3870
+ * Removes words that are too long or too short. See
3871
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html
3872
+ */
3073
3873
  Length = "length",
3074
- /** Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html */
3874
+ /**
3875
+ * Limits the number of tokens while indexing. See
3876
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html
3877
+ */
3075
3878
  Limit = "limit",
3076
- /** Normalizes token text to lower case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
3879
+ /**
3880
+ * Normalizes token text to lower case. See
3881
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm
3882
+ */
3077
3883
  Lowercase = "lowercase",
3078
- /** Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html */
3884
+ /**
3885
+ * Generates n-grams of the given size(s). See
3886
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html
3887
+ */
3079
3888
  NGram = "nGram_v2",
3080
- /** Applies normalization for Persian. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html */
3889
+ /**
3890
+ * Applies normalization for Persian. See
3891
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html
3892
+ */
3081
3893
  PersianNormalization = "persian_normalization",
3082
- /** Create tokens for phonetic matches. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html */
3894
+ /**
3895
+ * Create tokens for phonetic matches. See
3896
+ * https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html
3897
+ */
3083
3898
  Phonetic = "phonetic",
3084
- /** Uses the Porter stemming algorithm to transform the token stream. See http:\//tartarus.org\/~martin\/PorterStemmer */
3899
+ /**
3900
+ * Uses the Porter stemming algorithm to transform the token stream. See
3901
+ * http://tartarus.org/~martin/PorterStemmer
3902
+ */
3085
3903
  PorterStem = "porter_stem",
3086
- /** Reverses the token string. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
3904
+ /**
3905
+ * Reverses the token string. See
3906
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html
3907
+ */
3087
3908
  Reverse = "reverse",
3088
- /** Normalizes use of the interchangeable Scandinavian characters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html */
3909
+ /**
3910
+ * Normalizes use of the interchangeable Scandinavian characters. See
3911
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html
3912
+ */
3089
3913
  ScandinavianNormalization = "scandinavian_normalization",
3090
- /** Folds Scandinavian characters åÅäæÄÆ-&gt;a and öÖøØ-&gt;o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html */
3914
+ /**
3915
+ * Folds Scandinavian characters åÅäæÄÆ-&gt;a and öÖøØ-&gt;o. It also discriminates against use
3916
+ * of double vowels aa, ae, ao, oe and oo, leaving just the first one. See
3917
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html
3918
+ */
3091
3919
  ScandinavianFoldingNormalization = "scandinavian_folding",
3092
- /** Creates combinations of tokens as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html */
3920
+ /**
3921
+ * Creates combinations of tokens as a single token. See
3922
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html
3923
+ */
3093
3924
  Shingle = "shingle",
3094
- /** A filter that stems words using a Snowball-generated stemmer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html */
3925
+ /**
3926
+ * A filter that stems words using a Snowball-generated stemmer. See
3927
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html
3928
+ */
3095
3929
  Snowball = "snowball",
3096
- /** Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html */
3930
+ /**
3931
+ * Normalizes the Unicode representation of Sorani text. See
3932
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html
3933
+ */
3097
3934
  SoraniNormalization = "sorani_normalization",
3098
- /** Language specific stemming filter. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */
3935
+ /**
3936
+ * Language specific stemming filter. See
3937
+ * https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters
3938
+ */
3099
3939
  Stemmer = "stemmer",
3100
- /** Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html */
3940
+ /**
3941
+ * Removes stop words from a token stream. See
3942
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html
3943
+ */
3101
3944
  Stopwords = "stopwords",
3102
- /** Trims leading and trailing whitespace from tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html */
3945
+ /**
3946
+ * Trims leading and trailing whitespace from tokens. See
3947
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html
3948
+ */
3103
3949
  Trim = "trim",
3104
- /** Truncates the terms to a specific length. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html */
3950
+ /**
3951
+ * Truncates the terms to a specific length. See
3952
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html
3953
+ */
3105
3954
  Truncate = "truncate",
3106
- /** Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html */
3955
+ /**
3956
+ * Filters out tokens with same text as the previous token. See
3957
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html
3958
+ */
3107
3959
  Unique = "unique",
3108
- /** Normalizes token text to upper case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
3960
+ /**
3961
+ * Normalizes token text to upper case. See
3962
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html
3963
+ */
3109
3964
  Uppercase = "uppercase",
3110
- /** Splits words into subwords and performs optional transformations on subword groups. */
3965
+ /**
3966
+ * Splits words into subwords and performs optional transformations on subword groups.
3967
+ */
3111
3968
  WordDelimiter = "word_delimiter"
3112
3969
  }
3113
3970
 
3114
- /** Known values of {@link LexicalTokenizerName} that the service accepts. */
3971
+ /**
3972
+ * Defines values for TokenizerName.
3973
+ * @readonly
3974
+ */
3115
3975
  export declare enum KnownTokenizerNames {
3116
- /** Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html */
3976
+ /**
3977
+ * Grammar-based tokenizer that is suitable for processing most European-language documents. See
3978
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html
3979
+ */
3117
3980
  Classic = "classic",
3118
- /** Tokenizes the input from an edge into n-grams of the given size(s). See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenizer.html */
3981
+ /**
3982
+ * Tokenizes the input from an edge into n-grams of the given size(s). See
3983
+ * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html
3984
+ */
3119
3985
  EdgeNGram = "edgeNGram",
3120
- /** Emits the entire input as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordTokenizer.html */
3986
+ /**
3987
+ * Emits the entire input as a single token. See
3988
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html
3989
+ */
3121
3990
  Keyword = "keyword_v2",
3122
- /** Divides text at non-letters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LetterTokenizer.html */
3991
+ /**
3992
+ * Divides text at non-letters. See
3993
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LetterTokenizer.html
3994
+ */
3123
3995
  Letter = "letter",
3124
- /** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseTokenizer.html */
3996
+ /**
3997
+ * Divides text at non-letters and converts them to lower case. See
3998
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseTokenizer.html
3999
+ */
3125
4000
  Lowercase = "lowercase",
3126
- /** Divides text using language-specific rules. */
4001
+ /**
4002
+ * Divides text using language-specific rules.
4003
+ */
3127
4004
  MicrosoftLanguageTokenizer = "microsoft_language_tokenizer",
3128
- /** Divides text using language-specific rules and reduces words to their base forms. */
4005
+ /**
4006
+ * Divides text using language-specific rules and reduces words to their base forms.
4007
+ */
3129
4008
  MicrosoftLanguageStemmingTokenizer = "microsoft_language_stemming_tokenizer",
3130
- /** Tokenizes the input into n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenizer.html */
4009
+ /**
4010
+ * Tokenizes the input into n-grams of the given size(s). See
4011
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html
4012
+ */
3131
4013
  NGram = "nGram",
3132
- /** Tokenizer for path-like hierarchies. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/path\/PathHierarchyTokenizer.html */
4014
+ /**
4015
+ * Tokenizer for path-like hierarchies. See
4016
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html
4017
+ */
3133
4018
  PathHierarchy = "path_hierarchy_v2",
3134
- /** Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/pattern\/PatternTokenizer.html */
4019
+ /**
4020
+ * Tokenizer that uses regex pattern matching to construct distinct tokens. See
4021
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html
4022
+ */
3135
4023
  Pattern = "pattern",
3136
- /** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/StandardTokenizer.html */
4024
+ /**
4025
+ * Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop
4026
+ * filter. See
4027
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html
4028
+ */
3137
4029
  Standard = "standard_v2",
3138
- /** Tokenizes urls and emails as one token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/UAX29URLEmailTokenizer.html */
4030
+ /**
4031
+ * Tokenizes urls and emails as one token. See
4032
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html
4033
+ */
3139
4034
  UaxUrlEmail = "uax_url_email",
3140
- /** Divides text at whitespace. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html */
4035
+ /**
4036
+ * Divides text at whitespace. See
4037
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html
4038
+ */
3141
4039
  Whitespace = "whitespace"
3142
4040
  }
3143
4041
 
@@ -3160,7 +4058,11 @@ export declare enum KnownVectorQueryKind {
3160
4058
  /** Vector query where a raw vector value is provided. */
3161
4059
  Vector = "vector",
3162
4060
  /** Vector query where a text value that needs to be vectorized is provided. */
3163
- Text = "text"
4061
+ Text = "text",
4062
+ /** Vector query where an url that represents an image value that needs to be vectorized is provided. */
4063
+ ImageUrl = "imageUrl",
4064
+ /** Vector query where a base 64 encoded binary of an image that needs to be vectorized is provided. */
4065
+ ImageBinary = "imageBinary"
3164
4066
  }
3165
4067
 
3166
4068
  /** Known values of {@link VectorSearchAlgorithmKind} that the service accepts. */
@@ -3202,7 +4104,19 @@ export declare enum KnownVectorSearchVectorizerKind {
3202
4104
  /** Generate embeddings using an Azure OpenAI resource at query time. */
3203
4105
  AzureOpenAI = "azureOpenAI",
3204
4106
  /** Generate embeddings using a custom web endpoint at query time. */
3205
- CustomWebApi = "customWebApi"
4107
+ CustomWebApi = "customWebApi",
4108
+ /** Generate embeddings for an image or text input at query time using the Azure AI Services Vision Vectorize API. */
4109
+ AIServicesVision = "aiServicesVision",
4110
+ /** Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Studio Model Catalog at query time. */
4111
+ AML = "aml"
4112
+ }
4113
+
4114
+ /** Known values of {@link VectorThresholdKind} that the service accepts. */
4115
+ export declare enum KnownVectorThresholdKind {
4116
+ /** The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */
4117
+ VectorSimilarity = "vectorSimilarity",
4118
+ /** The results of the vector query will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. */
4119
+ SearchScore = "searchScore"
3206
4120
  }
3207
4121
 
3208
4122
  /** Known values of {@link VisualFeature} that the service accepts. */
@@ -3349,6 +4263,24 @@ export declare type LexicalAnalyzer = CustomAnalyzer | PatternAnalyzer | LuceneS
3349
4263
  */
3350
4264
  export declare type LexicalAnalyzerName = string;
3351
4265
 
4266
+ /**
4267
+ * Contains the possible cases for LexicalNormalizer.
4268
+ */
4269
+ export declare type LexicalNormalizer = CustomNormalizer;
4270
+
4271
+ /**
4272
+ * Defines values for LexicalNormalizerName. \
4273
+ * {@link KnownLexicalNormalizerName} can be used interchangeably with LexicalNormalizerName,
4274
+ * this enum contains the known values that the service supports.
4275
+ * ### Known values supported by the service
4276
+ * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \
4277
+ * **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \
4278
+ * **lowercase**: Normalizes token text to lowercase. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \
4279
+ * **standard**: Standard normalizer, which consists of lowercase and asciifolding. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \
4280
+ * **uppercase**: Normalizes token text to uppercase. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html
4281
+ */
4282
+ export declare type LexicalNormalizerName = string;
4283
+
3352
4284
  /**
3353
4285
  * Contains the possible cases for Tokenizer.
3354
4286
  */
@@ -3385,6 +4317,11 @@ export declare interface LimitTokenFilter extends BaseTokenFilter {
3385
4317
  consumeAllTokens?: boolean;
3386
4318
  }
3387
4319
 
4320
+ /**
4321
+ * Options for list aliases operation.
4322
+ */
4323
+ export declare type ListAliasesOptions = OperationOptions;
4324
+
3388
4325
  /**
3389
4326
  * Options for a list data sources operation.
3390
4327
  */
@@ -3534,6 +4471,12 @@ export declare type MicrosoftTokenizerLanguage = "bangla" | "bulgarian" | "catal
3534
4471
  */
3535
4472
  export declare type NarrowedModel<TModel extends object, TFields extends SelectFields<TModel> = SelectFields<TModel>> = (<T>() => T extends TModel ? true : false) extends <T>() => T extends never ? true : false ? TModel : (<T>() => T extends TModel ? true : false) extends <T>() => T extends object ? true : false ? TModel : (<T>() => T extends TModel ? true : false) extends <T>() => T extends any ? true : false ? TModel : (<T>() => T extends TModel ? true : false) extends <T>() => T extends unknown ? true : false ? TModel : (<T>() => T extends TFields ? true : false) extends <T>() => T extends never ? true : false ? never : (<T>() => T extends TFields ? true : false) extends <T>() => T extends SelectFields<TModel> ? true : false ? TModel : SearchPick<TModel, TFields>;
3536
4473
 
4474
+ /** Defines a data deletion detection policy utilizing Azure Blob Storage's native soft delete feature for deletion detection. */
4475
+ export declare interface NativeBlobSoftDeleteDeletionDetectionPolicy extends BaseDataDeletionDetectionPolicy {
4476
+ /** Polymorphic discriminator, which specifies the different types this object can be */
4477
+ odatatype: "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy";
4478
+ }
4479
+
3537
4480
  /**
3538
4481
  * Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene.
3539
4482
  */
@@ -3571,6 +4514,16 @@ export declare interface NGramTokenizer extends BaseLexicalTokenizer {
3571
4514
  tokenChars?: TokenCharacterKind[];
3572
4515
  }
3573
4516
 
4517
+ /**
4518
+ * Specifies the properties for connecting to an AML vectorizer with no authentication.
4519
+ */
4520
+ export declare interface NoAuthAzureMachineLearningVectorizerParameters extends BaseAzureMachineLearningVectorizerParameters {
4521
+ /** Indicates how the service should attempt to identify itself to the AML instance */
4522
+ authKind: "none";
4523
+ /** The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */
4524
+ scoringUri: string;
4525
+ }
4526
+
3574
4527
  /**
3575
4528
  * Defines values for OcrLineEnding. \
3576
4529
  * {@link KnownOcrLineEnding} can be used interchangeably with OcrLineEnding,
@@ -3587,10 +4540,12 @@ export declare type OcrLineEnding = string;
3587
4540
  export declare interface OcrSkill extends BaseSearchIndexerSkill {
3588
4541
  /** Polymorphic discriminator, which specifies the different types this object can be */
3589
4542
  odatatype: "#Microsoft.Skills.Vision.OcrSkill";
3590
- /** A value indicating which language code to use. Default is en. */
4543
+ /** A value indicating which language code to use. Default is `en`. */
3591
4544
  defaultLanguageCode?: OcrSkillLanguage;
3592
4545
  /** A value indicating to turn orientation detection on or not. Default is false. */
3593
4546
  shouldDetectOrientation?: boolean;
4547
+ /** Defines the sequence of characters to use between the lines of text recognized by the OCR skill. The default value is "space". */
4548
+ lineEnding?: OcrLineEnding;
3594
4549
  }
3595
4550
 
3596
4551
  export declare type OcrSkillLanguage = `${KnownOcrSkillLanguage}`;
@@ -3822,16 +4777,190 @@ export declare interface QueryCaptionResult {
3822
4777
  readonly highlights?: string;
3823
4778
  }
3824
4779
 
4780
+ /**
4781
+ * Defines values for QueryDebugMode. \
4782
+ * {@link KnownQueryDebugMode} can be used interchangeably with QueryDebugMode,
4783
+ * this enum contains the known values that the service supports.
4784
+ * ### Known values supported by the service
4785
+ * **disabled**: No query debugging information will be returned. \
4786
+ * **semantic**: Allows the user to further explore their reranked results.
4787
+ */
4788
+ export declare type QueryDebugMode = string;
4789
+
4790
+ /**
4791
+ * Defines values for QueryLanguage. \
4792
+ * {@link KnownQueryLanguage} can be used interchangeably with QueryLanguage,
4793
+ * this enum contains the known values that the service supports.
4794
+ * ### Known values supported by the service
4795
+ * **none**: Query language not specified. \
4796
+ * **en-us**: Query language value for English (United States). \
4797
+ * **en-gb**: Query language value for English (Great Britain). \
4798
+ * **en-in**: Query language value for English (India). \
4799
+ * **en-ca**: Query language value for English (Canada). \
4800
+ * **en-au**: Query language value for English (Australia). \
4801
+ * **fr-fr**: Query language value for French (France). \
4802
+ * **fr-ca**: Query language value for French (Canada). \
4803
+ * **de-de**: Query language value for German (Germany). \
4804
+ * **es-es**: Query language value for Spanish (Spain). \
4805
+ * **es-mx**: Query language value for Spanish (Mexico). \
4806
+ * **zh-cn**: Query language value for Chinese (China). \
4807
+ * **zh-tw**: Query language value for Chinese (Taiwan). \
4808
+ * **pt-br**: Query language value for Portuguese (Brazil). \
4809
+ * **pt-pt**: Query language value for Portuguese (Portugal). \
4810
+ * **it-it**: Query language value for Italian (Italy). \
4811
+ * **ja-jp**: Query language value for Japanese (Japan). \
4812
+ * **ko-kr**: Query language value for Korean (Korea). \
4813
+ * **ru-ru**: Query language value for Russian (Russia). \
4814
+ * **cs-cz**: Query language value for Czech (Czech Republic). \
4815
+ * **nl-be**: Query language value for Dutch (Belgium). \
4816
+ * **nl-nl**: Query language value for Dutch (Netherlands). \
4817
+ * **hu-hu**: Query language value for Hungarian (Hungary). \
4818
+ * **pl-pl**: Query language value for Polish (Poland). \
4819
+ * **sv-se**: Query language value for Swedish (Sweden). \
4820
+ * **tr-tr**: Query language value for Turkish (Turkey). \
4821
+ * **hi-in**: Query language value for Hindi (India). \
4822
+ * **ar-sa**: Query language value for Arabic (Saudi Arabia). \
4823
+ * **ar-eg**: Query language value for Arabic (Egypt). \
4824
+ * **ar-ma**: Query language value for Arabic (Morocco). \
4825
+ * **ar-kw**: Query language value for Arabic (Kuwait). \
4826
+ * **ar-jo**: Query language value for Arabic (Jordan). \
4827
+ * **da-dk**: Query language value for Danish (Denmark). \
4828
+ * **no-no**: Query language value for Norwegian (Norway). \
4829
+ * **bg-bg**: Query language value for Bulgarian (Bulgaria). \
4830
+ * **hr-hr**: Query language value for Croatian (Croatia). \
4831
+ * **hr-ba**: Query language value for Croatian (Bosnia and Herzegovina). \
4832
+ * **ms-my**: Query language value for Malay (Malaysia). \
4833
+ * **ms-bn**: Query language value for Malay (Brunei Darussalam). \
4834
+ * **sl-sl**: Query language value for Slovenian (Slovenia). \
4835
+ * **ta-in**: Query language value for Tamil (India). \
4836
+ * **vi-vn**: Query language value for Vietnamese (Viet Nam). \
4837
+ * **el-gr**: Query language value for Greek (Greece). \
4838
+ * **ro-ro**: Query language value for Romanian (Romania). \
4839
+ * **is-is**: Query language value for Icelandic (Iceland). \
4840
+ * **id-id**: Query language value for Indonesian (Indonesia). \
4841
+ * **th-th**: Query language value for Thai (Thailand). \
4842
+ * **lt-lt**: Query language value for Lithuanian (Lithuania). \
4843
+ * **uk-ua**: Query language value for Ukrainian (Ukraine). \
4844
+ * **lv-lv**: Query language value for Latvian (Latvia). \
4845
+ * **et-ee**: Query language value for Estonian (Estonia). \
4846
+ * **ca-es**: Query language value for Catalan. \
4847
+ * **fi-fi**: Query language value for Finnish (Finland). \
4848
+ * **sr-ba**: Query language value for Serbian (Bosnia and Herzegovina). \
4849
+ * **sr-me**: Query language value for Serbian (Montenegro). \
4850
+ * **sr-rs**: Query language value for Serbian (Serbia). \
4851
+ * **sk-sk**: Query language value for Slovak (Slovakia). \
4852
+ * **nb-no**: Query language value for Norwegian (Norway). \
4853
+ * **hy-am**: Query language value for Armenian (Armenia). \
4854
+ * **bn-in**: Query language value for Bengali (India). \
4855
+ * **eu-es**: Query language value for Basque. \
4856
+ * **gl-es**: Query language value for Galician. \
4857
+ * **gu-in**: Query language value for Gujarati (India). \
4858
+ * **he-il**: Query language value for Hebrew (Israel). \
4859
+ * **ga-ie**: Query language value for Irish (Ireland). \
4860
+ * **kn-in**: Query language value for Kannada (India). \
4861
+ * **ml-in**: Query language value for Malayalam (India). \
4862
+ * **mr-in**: Query language value for Marathi (India). \
4863
+ * **fa-ae**: Query language value for Persian (U.A.E.). \
4864
+ * **pa-in**: Query language value for Punjabi (India). \
4865
+ * **te-in**: Query language value for Telugu (India). \
4866
+ * **ur-pk**: Query language value for Urdu (Pakistan).
4867
+ */
4868
+ export declare type QueryLanguage = string;
4869
+
4870
+ /** The raw concatenated strings that were sent to the semantic enrichment process. */
4871
+ export declare interface QueryResultDocumentRerankerInput {
4872
+ /**
4873
+ * The raw string for the title field that was used for semantic enrichment.
4874
+ * NOTE: This property will not be serialized. It can only be populated by the server.
4875
+ */
4876
+ readonly title?: string;
4877
+ /**
4878
+ * The raw concatenated strings for the content fields that were used for semantic enrichment.
4879
+ * NOTE: This property will not be serialized. It can only be populated by the server.
4880
+ */
4881
+ readonly content?: string;
4882
+ /**
4883
+ * The raw concatenated strings for the keyword fields that were used for semantic enrichment.
4884
+ * NOTE: This property will not be serialized. It can only be populated by the server.
4885
+ */
4886
+ readonly keywords?: string;
4887
+ }
4888
+
4889
+ /** Description of fields that were sent to the semantic enrichment process, as well as how they were used */
4890
+ export declare interface QueryResultDocumentSemanticField {
4891
+ /**
4892
+ * The name of the field that was sent to the semantic enrichment process
4893
+ * NOTE: This property will not be serialized. It can only be populated by the server.
4894
+ */
4895
+ readonly name?: string;
4896
+ /**
4897
+ * The way the field was used for the semantic enrichment process (fully used, partially used, or unused)
4898
+ * NOTE: This property will not be serialized. It can only be populated by the server.
4899
+ */
4900
+ readonly state?: SemanticFieldState;
4901
+ }
4902
+
4903
+ /** The breakdown of subscores between the text and vector query components of the search query for this document. Each vector query is shown as a separate object in the same order they were received. */
4904
+ export declare interface QueryResultDocumentSubscores {
4905
+ /**
4906
+ * The BM25 or Classic score for the text portion of the query.
4907
+ * NOTE: This property will not be serialized. It can only be populated by the server.
4908
+ */
4909
+ readonly text?: TextResult;
4910
+ /**
4911
+ * The vector similarity and @search.score values for each vector query.
4912
+ * NOTE: This property will not be serialized. It can only be populated by the server.
4913
+ */
4914
+ readonly vectors?: {
4915
+ [propertyName: string]: SingleVectorFieldResult;
4916
+ }[];
4917
+ /**
4918
+ * The BM25 or Classic score for the text portion of the query.
4919
+ * NOTE: This property will not be serialized. It can only be populated by the server.
4920
+ */
4921
+ readonly documentBoost?: number;
4922
+ }
4923
+
4924
+ /**
4925
+ * Defines values for QuerySpellerType. \
4926
+ * {@link KnownQuerySpellerType} can be used interchangeably with QuerySpellerType,
4927
+ * this enum contains the known values that the service supports.
4928
+ * ### Known values supported by the service
4929
+ * **none**: Speller not enabled. \
4930
+ * **lexicon**: Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter.
4931
+ */
4932
+ export declare type QuerySpellerType = string;
4933
+
3825
4934
  /** Defines values for QueryType. */
3826
4935
  export declare type QueryType = "simple" | "full" | "semantic";
3827
4936
 
3828
4937
  export declare type RegexFlags = `${KnownRegexFlags}`;
3829
4938
 
4939
+ /**
4940
+ * Options for reset docs operation.
4941
+ */
4942
+ export declare interface ResetDocumentsOptions extends OperationOptions {
4943
+ /** document keys to be reset */
4944
+ documentKeys?: string[];
4945
+ /** datasource document identifiers to be reset */
4946
+ datasourceDocumentIds?: string[];
4947
+ /** If false, keys or ids will be appended to existing ones. If true, only the keys or ids in this payload will be queued to be re-ingested. */
4948
+ overwrite?: boolean;
4949
+ }
4950
+
3830
4951
  /**
3831
4952
  * Options for reset indexer operation.
3832
4953
  */
3833
4954
  export declare type ResetIndexerOptions = OperationOptions;
3834
4955
 
4956
+ /**
4957
+ * Options for reset skills operation.
4958
+ */
4959
+ export declare interface ResetSkillsOptions extends OperationOptions {
4960
+ /** the names of skills to be reset. */
4961
+ skillNames?: string[];
4962
+ }
4963
+
3835
4964
  /** Represents a resource's usage and quota. */
3836
4965
  export declare interface ResourceCounter {
3837
4966
  /** The resource usage amount. */
@@ -3897,6 +5026,16 @@ export declare interface ScoringProfile {
3897
5026
  /** Defines values for ScoringStatistics. */
3898
5027
  export declare type ScoringStatistics = "local" | "global";
3899
5028
 
5029
+ /** Represents an index alias, which describes a mapping from the alias name to an index. The alias name can be used in place of the index name for supported operations. */
5030
+ export declare interface SearchAlias {
5031
+ /** The name of the alias. */
5032
+ name: string;
5033
+ /** The name of the index this alias maps to. Only one index name may be specified. */
5034
+ indexes: string[];
5035
+ /** The ETag of the alias. */
5036
+ etag?: string;
5037
+ }
5038
+
3900
5039
  /**
3901
5040
  * Class used to perform operations against a search index,
3902
5041
  * including querying documents in the index as well as
@@ -3925,6 +5064,10 @@ export declare class SearchClient<TModel extends object> implements IndexDocumen
3925
5064
  * A reference to the auto-generated SearchClient
3926
5065
  */
3927
5066
  private readonly client;
5067
+ /**
5068
+ * A reference to the internal HTTP pipeline for use with raw requests
5069
+ */
5070
+ readonly pipeline: Pipeline;
3928
5071
  /**
3929
5072
  * Creates an instance of SearchClient.
3930
5073
  *
@@ -4131,6 +5274,7 @@ export declare class SearchClient<TModel extends object> implements IndexDocumen
4131
5274
  private convertSelect;
4132
5275
  private convertVectorQueryFields;
4133
5276
  private convertSearchFields;
5277
+ private convertSemanticFields;
4134
5278
  private convertOrderBy;
4135
5279
  private convertQueryAnswers;
4136
5280
  private convertQueryCaptions;
@@ -4329,6 +5473,10 @@ export declare interface SearchIndex {
4329
5473
  * The character filters for the index.
4330
5474
  */
4331
5475
  charFilters?: CharFilter[];
5476
+ /**
5477
+ * The normalizers for the index.
5478
+ */
5479
+ normalizers?: LexicalNormalizer[];
4332
5480
  /**
4333
5481
  * A description of an encryption key that you create in Azure Key Vault. This key is used to
4334
5482
  * provide an additional level of encryption-at-rest for your data when you want full assurance
@@ -4360,6 +5508,11 @@ export declare interface SearchIndex {
4360
5508
  etag?: string;
4361
5509
  }
4362
5510
 
5511
+ /**
5512
+ * Search Alias object.
5513
+ */
5514
+ export declare type SearchIndexAlias = SearchAlias;
5515
+
4363
5516
  /**
4364
5517
  * Class to perform operations to manage
4365
5518
  * (create, update, list/delete)
@@ -4384,6 +5537,10 @@ export declare class SearchIndexClient {
4384
5537
  * A reference to the auto-generated SearchServiceClient
4385
5538
  */
4386
5539
  private readonly client;
5540
+ /**
5541
+ * A reference to the internal HTTP pipeline for use with raw requests
5542
+ */
5543
+ readonly pipeline: Pipeline;
4387
5544
  /**
4388
5545
  * Used to authenticate requests to the service.
4389
5546
  */
@@ -4416,6 +5573,13 @@ export declare class SearchIndexClient {
4416
5573
  * @param options - Options to the list index operation.
4417
5574
  */
4418
5575
  listIndexes(options?: ListIndexesOptions): IndexIterator;
5576
+ private listAliasesPage;
5577
+ private listAliasesAll;
5578
+ /**
5579
+ * Lists all aliases available for a search service.
5580
+ * @param options - The options parameters.
5581
+ */
5582
+ listAliases(options?: ListAliasesOptions): AliasIterator;
4419
5583
  private listIndexesNamesPage;
4420
5584
  private listIndexesNamesAll;
4421
5585
  /**
@@ -4481,6 +5645,31 @@ export declare class SearchIndexClient {
4481
5645
  * @param options - Additional optional arguments.
4482
5646
  */
4483
5647
  deleteSynonymMap(synonymMap: string | SynonymMap, options?: DeleteSynonymMapOptions): Promise<void>;
5648
+ /**
5649
+ * Creates a new search alias or updates an alias if it already exists.
5650
+ * @param alias - The definition of the alias to create or update.
5651
+ * @param options - The options parameters.
5652
+ */
5653
+ createOrUpdateAlias(alias: SearchIndexAlias, options?: CreateOrUpdateAliasOptions): Promise<SearchIndexAlias>;
5654
+ /**
5655
+ * Creates a new search alias.
5656
+ * @param alias - The definition of the alias to create.
5657
+ * @param options - The options parameters.
5658
+ */
5659
+ createAlias(alias: SearchIndexAlias, options?: CreateAliasOptions): Promise<SearchIndexAlias>;
5660
+ /**
5661
+ * Deletes a search alias and its associated mapping to an index. This operation is permanent, with no
5662
+ * recovery option. The mapped index is untouched by this operation.
5663
+ * @param alias - Alias/Name name of the alias to delete.
5664
+ * @param options - The options parameters.
5665
+ */
5666
+ deleteAlias(alias: string | SearchIndexAlias, options?: DeleteAliasOptions): Promise<void>;
5667
+ /**
5668
+ * Retrieves an alias definition.
5669
+ * @param aliasName - The name of the alias to retrieve.
5670
+ * @param options - The options parameters.
5671
+ */
5672
+ getAlias(aliasName: string, options?: GetAliasOptions): Promise<SearchIndexAlias>;
4484
5673
  /**
4485
5674
  * Retrieves statistics about an index, such as the count of documents and the size
4486
5675
  * of index storage.
@@ -4594,6 +5783,29 @@ export declare interface SearchIndexer {
4594
5783
  * paid services created on or after January 1, 2019.
4595
5784
  */
4596
5785
  encryptionKey?: SearchResourceEncryptionKey;
5786
+ /**
5787
+ * Adds caching to an enrichment pipeline to allow for incremental modification steps without
5788
+ * having to rebuild the index every time.
5789
+ */
5790
+ cache?: SearchIndexerCache;
5791
+ }
5792
+
5793
+ export declare interface SearchIndexerCache {
5794
+ /**
5795
+ * The connection string to the storage account where the cache data will be persisted.
5796
+ */
5797
+ storageConnectionString?: string;
5798
+ /**
5799
+ * Specifies whether incremental reprocessing is enabled.
5800
+ */
5801
+ enableReprocessing?: boolean;
5802
+ /** The user-assigned managed identity used for connections to the enrichment cache. If the
5803
+ * connection string indicates an identity (ResourceId) and it's not specified, the
5804
+ * system-assigned managed identity is used. On updates to the indexer, if the identity is
5805
+ * unspecified, the value remains unchanged. If set to "none", the value of this property is
5806
+ * cleared.
5807
+ */
5808
+ identity?: SearchIndexerDataIdentity;
4597
5809
  }
4598
5810
 
4599
5811
  /**
@@ -4620,6 +5832,10 @@ export declare class SearchIndexerClient {
4620
5832
  * A reference to the auto-generated SearchServiceClient
4621
5833
  */
4622
5834
  private readonly client;
5835
+ /**
5836
+ * A reference to the internal HTTP pipeline for use with raw requests
5837
+ */
5838
+ readonly pipeline: Pipeline;
4623
5839
  /**
4624
5840
  * Creates an instance of SearchIndexerClient.
4625
5841
  *
@@ -4757,6 +5973,19 @@ export declare class SearchIndexerClient {
4757
5973
  * @param options - Additional optional arguments.
4758
5974
  */
4759
5975
  runIndexer(indexerName: string, options?: RunIndexerOptions): Promise<void>;
5976
+ /**
5977
+ * Resets specific documents in the datasource to be selectively re-ingested by the indexer.
5978
+ * @param indexerName - The name of the indexer to reset documents for.
5979
+ * @param options - Additional optional arguments.
5980
+ */
5981
+ resetDocuments(indexerName: string, options?: ResetDocumentsOptions): Promise<void>;
5982
+ /**
5983
+ * Reset an existing skillset in a search service.
5984
+ * @param skillsetName - The name of the skillset to reset.
5985
+ * @param skillNames - The names of skills to reset.
5986
+ * @param options - The options parameters.
5987
+ */
5988
+ resetSkills(skillsetName: string, options?: ResetSkillsOptions): Promise<void>;
4760
5989
  }
4761
5990
 
4762
5991
  /**
@@ -5023,7 +6252,7 @@ export declare interface SearchIndexerLimits {
5023
6252
  /**
5024
6253
  * Contains the possible cases for Skill.
5025
6254
  */
5026
- export declare type SearchIndexerSkill = AzureOpenAIEmbeddingSkill | ConditionalSkill | CustomEntityLookupSkill | DocumentExtractionSkill | EntityLinkingSkill | EntityRecognitionSkill | EntityRecognitionSkillV3 | ImageAnalysisSkill | KeyPhraseExtractionSkill | LanguageDetectionSkill | MergeSkill | OcrSkill | PIIDetectionSkill | SentimentSkill | SentimentSkillV3 | ShaperSkill | SplitSkill | TextTranslationSkill | WebApiSkill;
6255
+ export declare type SearchIndexerSkill = AzureMachineLearningSkill | AzureOpenAIEmbeddingSkill | ConditionalSkill | CustomEntityLookupSkill | DocumentExtractionSkill | EntityLinkingSkill | EntityRecognitionSkill | EntityRecognitionSkillV3 | ImageAnalysisSkill | KeyPhraseExtractionSkill | LanguageDetectionSkill | MergeSkill | OcrSkill | PIIDetectionSkill | SentimentSkill | SentimentSkillV3 | ShaperSkill | SplitSkill | TextTranslationSkill | VisionVectorizeSkill | WebApiSkill;
5027
6256
 
5028
6257
  /**
5029
6258
  * A list of skills.
@@ -5478,8 +6707,21 @@ export declare type SearchResult<TModel extends object, TFields extends SelectFi
5478
6707
  */
5479
6708
  readonly captions?: QueryCaptionResult[];
5480
6709
  document: NarrowedModel<TModel, TFields>;
6710
+ /**
6711
+ * Contains debugging information that can be used to further explore your search results.
6712
+ * NOTE: This property will not be serialized. It can only be populated by the server.
6713
+ */
6714
+ readonly documentDebugInfo?: DocumentDebugInfo[];
5481
6715
  };
5482
6716
 
6717
+ /** The results of the vector query will filter based on the '\@search.score' value. Note this is the \@search.score returned as part of the search response. The threshold direction will be chosen for higher \@search.score. */
6718
+ export declare interface SearchScoreThreshold extends BaseVectorThreshold {
6719
+ /** Polymorphic discriminator, which specifies the different types this object can be */
6720
+ kind: "searchScore";
6721
+ /** The threshold will filter based on the '\@search.score' value. Note this is the \@search.score returned as part of the search response. The threshold direction will be chosen for higher \@search.score. */
6722
+ value: number;
6723
+ }
6724
+
5483
6725
  /**
5484
6726
  * Response from a get service statistics request. If successful, it includes service level
5485
6727
  * counters and limits.
@@ -5527,6 +6769,32 @@ export declare interface SemanticConfiguration {
5527
6769
  prioritizedFields: SemanticPrioritizedFields;
5528
6770
  }
5529
6771
 
6772
+ /**
6773
+ * Debug options for semantic search queries.
6774
+ */
6775
+ export declare interface SemanticDebugInfo {
6776
+ /**
6777
+ * The title field that was sent to the semantic enrichment process, as well as how it was used
6778
+ * NOTE: This property will not be serialized. It can only be populated by the server.
6779
+ */
6780
+ readonly titleField?: QueryResultDocumentSemanticField;
6781
+ /**
6782
+ * The content fields that were sent to the semantic enrichment process, as well as how they were used
6783
+ * NOTE: This property will not be serialized. It can only be populated by the server.
6784
+ */
6785
+ readonly contentFields?: QueryResultDocumentSemanticField[];
6786
+ /**
6787
+ * The keyword fields that were sent to the semantic enrichment process, as well as how they were used
6788
+ * NOTE: This property will not be serialized. It can only be populated by the server.
6789
+ */
6790
+ readonly keywordFields?: QueryResultDocumentSemanticField[];
6791
+ /**
6792
+ * The raw concatenated strings that were sent to the semantic enrichment process.
6793
+ * NOTE: This property will not be serialized. It can only be populated by the server.
6794
+ */
6795
+ readonly rerankerInput?: QueryResultDocumentRerankerInput;
6796
+ }
6797
+
5530
6798
  export declare type SemanticErrorMode = `${KnownSemanticErrorMode}`;
5531
6799
 
5532
6800
  export declare type SemanticErrorReason = `${KnownSemanticErrorReason}`;
@@ -5536,6 +6804,17 @@ export declare interface SemanticField {
5536
6804
  name: string;
5537
6805
  }
5538
6806
 
6807
+ /**
6808
+ * Defines values for SemanticFieldState. \
6809
+ * {@link KnownSemanticFieldState} can be used interchangeably with SemanticFieldState,
6810
+ * this enum contains the known values that the service supports.
6811
+ * ### Known values supported by the service
6812
+ * **used**: The field was fully used for semantic enrichment. \
6813
+ * **unused**: The field was not used for semantic enrichment. \
6814
+ * **partial**: The field was partially used for semantic enrichment.
6815
+ */
6816
+ export declare type SemanticFieldState = string;
6817
+
5539
6818
  /** Describes the title, content, and keywords fields to be used for semantic ranking, captions, highlights, and answers. */
5540
6819
  export declare interface SemanticPrioritizedFields {
5541
6820
  /** Defines the title field to be used for semantic ranking, captions, highlights, and answers. If you don't have a title field in your index, leave this blank. */
@@ -5589,6 +6868,14 @@ export declare interface SemanticSearchOptions {
5589
6868
  * different queries between the base retrieval and ranking phase, and the L2 semantic phase.
5590
6869
  */
5591
6870
  semanticQuery?: string;
6871
+ /**
6872
+ * The list of field names used for semantic search.
6873
+ */
6874
+ semanticFields?: string[];
6875
+ /**
6876
+ * Enables a debugging tool that can be used to further explore your search results.
6877
+ */
6878
+ debugMode?: QueryDebugMode;
5592
6879
  }
5593
6880
 
5594
6881
  export declare type SemanticSearchResultsType = `${KnownSemanticSearchResultsType}`;
@@ -5621,6 +6908,8 @@ export declare interface SentimentSkillV3 extends BaseSearchIndexerSkill {
5621
6908
 
5622
6909
  /** Represents service-level resource counters and quotas. */
5623
6910
  export declare interface ServiceCounters {
6911
+ /** Total number of aliases. */
6912
+ aliasCounter: ResourceCounter;
5624
6913
  /** Total number of documents across all indexes in the service. */
5625
6914
  documentCounter: ResourceCounter;
5626
6915
  /** Total number of indexes. */
@@ -5723,9 +7012,10 @@ export declare interface SimpleField {
5723
7012
  * returned in a search result. You can disable this option if you don't plan to return the field
5724
7013
  * contents in a search response to save on storage overhead. This can only be set during index
5725
7014
  * creation and only for vector fields. This property cannot be changed for existing fields or set
5726
- * as false for new fields. If this property is set as false, the property 'hidden' must be set to
5727
- * 'true'. This property must be false or unset for key fields, for new fields, and for non-vector
5728
- * fields. Disabling this property will reduce index storage requirements.
7015
+ * as false for new fields. If this property is set to `false`, the property `hidden` must be set to
7016
+ * `true`. This property must be true or unset for key fields, for new fields, and for non-vector
7017
+ * fields, and it must be null for complex fields. Disabling this property will reduce index
7018
+ * storage requirements. The default is true for vector fields.
5729
7019
  */
5730
7020
  stored?: boolean;
5731
7021
  /**
@@ -5733,50 +7023,50 @@ export declare interface SimpleField {
5733
7023
  * analysis such as word-breaking during indexing. If you set a searchable field to a value like
5734
7024
  * "sunny day", internally it will be split into the individual tokens "sunny" and "day". This
5735
7025
  * enables full-text searches for these terms. Fields of type Edm.String or Collection(Edm.String)
5736
- * are searchable by default. This property must be false for simple fields of other non-string
5737
- * data types. Note: searchable fields consume extra space
5738
- * in your index to accommodate additional tokenized versions of the field value for full-text
7026
+ * are searchable by default. This property must be false for simple
7027
+ * fields of other non-string data types.
7028
+ * Note: searchable fields consume extra space in your index to accommodate additional tokenized versions of the field value for full-text
5739
7029
  * searches. If you want to save space in your index and you don't need a field to be included in
5740
7030
  * searches, set searchable to false. Default is false.
5741
7031
  */
5742
7032
  searchable?: boolean;
5743
7033
  /**
5744
- * A value indicating whether to enable the field to be referenced in $filter queries. filterable
5745
- * differs from searchable in how strings are handled. Fields of type Edm.String or
5746
- * Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are for
5747
- * exact matches only. For example, if you set such a field f to "sunny day", $filter=f eq 'sunny'
5748
- * will find no matches, but $filter=f eq 'sunny day' will. Default is false.
7034
+ * A value indicating whether to enable the field to be referenced in $filter queries. `filterable`
7035
+ * differs from `searchable` in how strings are handled. Fields of type Edm.String or
7036
+ * Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are
7037
+ * for exact matches only. For example, if you set such a field f to "sunny day", $filter=f eq
7038
+ * 'sunny' will find no matches, but $filter=f eq 'sunny day' will.
7039
+ * Default is false.
5749
7040
  */
5750
7041
  filterable?: boolean;
5751
7042
  /**
5752
7043
  * A value indicating whether to enable the field to be referenced in $orderby expressions. By
5753
- * default, the search engine sorts results by score, but in many experiences users will want to
5754
- * sort by fields in the documents. A simple field can be sortable only if it is single-valued (it
5755
- * has a single value in the scope of the parent document). Simple collection fields cannot be
5756
- * sortable, since they are multi-valued. Simple sub-fields of complex collections are also
7044
+ * default, the service sorts results by score, but in many experiences users will want
7045
+ * to sort by fields in the documents. A simple field can be sortable only if it is single-valued
7046
+ * (it has a single value in the scope of the parent document). Simple collection fields cannot
7047
+ * be sortable, since they are multi-valued. Simple sub-fields of complex collections are also
5757
7048
  * multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent
5758
7049
  * field, or an ancestor field, that's the complex collection. The default is false.
5759
- *
5760
7050
  */
5761
7051
  sortable?: boolean;
5762
7052
  /**
5763
7053
  * A value indicating whether to enable the field to be referenced in facet queries. Typically
5764
7054
  * used in a presentation of search results that includes hit count by category (for example,
5765
- * search for digital cameras and see hits by brand, by megapixels, by price, and so on). Fields
5766
- * of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. Default is
5767
- * false.
7055
+ * search for digital cameras and see hits by brand, by megapixels, by price, and so on).
7056
+ * Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable.
7057
+ * Default is false for all other simple fields.
5768
7058
  */
5769
7059
  facetable?: boolean;
5770
7060
  /**
5771
- * The name of the analyzer to use for the field. This option can be used only with searchable
5772
- * fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the
5773
- * analyzer is chosen, it cannot be changed for the field.
7061
+ * The name of the analyzer to use for the field. This option can be used only with
7062
+ * searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer.
7063
+ * Once the analyzer is chosen, it cannot be changed for the field.
5774
7064
  */
5775
7065
  analyzerName?: LexicalAnalyzerName;
5776
7066
  /**
5777
7067
  * The name of the analyzer used at search time for the field. This option can be used only with
5778
- * searchable fields. It must be set together with `indexAnalyzerName` and it cannot be set
5779
- * together with the `analyzerName` option. This property cannot be set to the name of a language
7068
+ * searchable fields. It must be set together with `indexAnalyzerName` and it cannot be set together
7069
+ * with the `analyzerName` option. This property cannot be set to the name of a language
5780
7070
  * analyzer; use the `analyzerName` property instead if you need a language analyzer. This
5781
7071
  * analyzer can be updated on an existing field.
5782
7072
  */
@@ -5784,18 +7074,22 @@ export declare interface SimpleField {
5784
7074
  /**
5785
7075
  * The name of the analyzer used at indexing time for the field. This option can be used only
5786
7076
  * with searchable fields. It must be set together with searchAnalyzer and it cannot be set
5787
- * together with the analyzer option. This property cannot be set to the name of a language
5788
- * analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer
5789
- * is chosen, it cannot be changed for the field.
7077
+ * together with the `analyzerName` option. Once the analyzer is chosen, it cannot be changed for the
7078
+ * field. KnownAnalyzerNames is an enum containing known values.
5790
7079
  */
5791
7080
  indexAnalyzerName?: LexicalAnalyzerName;
5792
7081
  /**
5793
7082
  * A list of the names of synonym maps to associate with this field. This option can be used only
5794
7083
  * with searchable fields. Currently only one synonym map per field is supported. Assigning a
5795
- * synonym map to a field ensures that query terms targeting that field are expanded at query-time
5796
- * using the rules in the synonym map. This attribute can be changed on existing fields.
7084
+ * synonym map to a field ensures that query terms targeting that field are expanded at
7085
+ * query-time using the rules in the synonym map. This attribute can be changed on existing
7086
+ * fields.
5797
7087
  */
5798
7088
  synonymMapNames?: string[];
7089
+ /**
7090
+ * The name of the normalizer used at indexing time for the field.
7091
+ */
7092
+ normalizerName?: LexicalNormalizerName;
5799
7093
  /**
5800
7094
  * The dimensionality of the vector field.
5801
7095
  */
@@ -5811,6 +7105,20 @@ export declare interface SimpleField {
5811
7105
  vectorEncodingFormat?: VectorEncodingFormat;
5812
7106
  }
5813
7107
 
7108
+ /** A single vector field result. Both @search.score and vector similarity values are returned. Vector similarity is related to @search.score by an equation. */
7109
+ export declare interface SingleVectorFieldResult {
7110
+ /**
7111
+ * The @search.score value that is calculated from the vector similarity score. This is the score that's visible in a pure single-field single-vector query.
7112
+ * NOTE: This property will not be serialized. It can only be populated by the server.
7113
+ */
7114
+ readonly searchScore?: number;
7115
+ /**
7116
+ * The vector similarity score for this document. Note this is the canonical definition of similarity metric, not the 'distance' version. For example, cosine similarity instead of cosine distance.
7117
+ * NOTE: This property will not be serialized. It can only be populated by the server.
7118
+ */
7119
+ readonly vectorSimilarity?: number;
7120
+ }
7121
+
5814
7122
  /** A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. */
5815
7123
  export declare interface SnowballTokenFilter extends BaseTokenFilter {
5816
7124
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -5832,20 +7140,60 @@ export declare interface SoftDeleteColumnDeletionDetectionPolicy extends BaseDat
5832
7140
  softDeleteMarkerValue?: string;
5833
7141
  }
5834
7142
 
7143
+ /**
7144
+ * Defines values for Speller. \
7145
+ * {@link KnownSpeller} can be used interchangeably with Speller,
7146
+ * this enum contains the known values that the service supports.
7147
+ * ### Known values supported by the service
7148
+ * **none**: Speller not enabled. \
7149
+ * **lexicon**: Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter.
7150
+ */
7151
+ export declare type Speller = string;
7152
+
5835
7153
  /** A skill to split a string into chunks of text. */
5836
7154
  export declare interface SplitSkill extends BaseSearchIndexerSkill {
5837
7155
  /** Polymorphic discriminator, which specifies the different types this object can be */
5838
7156
  odatatype: "#Microsoft.Skills.Text.SplitSkill";
5839
- /** A value indicating which language code to use. Default is en. */
7157
+ /** A value indicating which language code to use. Default is `en`. */
5840
7158
  defaultLanguageCode?: SplitSkillLanguage;
5841
7159
  /** A value indicating which split mode to perform. */
5842
7160
  textSplitMode?: TextSplitMode;
5843
7161
  /** The desired maximum page length. Default is 10000. */
5844
7162
  maxPageLength?: number;
7163
+ /** Only applicable when textSplitMode is set to 'pages'. If specified, n+1th chunk will start with this number of characters/tokens from the end of the nth chunk. */
7164
+ pageOverlapLength?: number;
7165
+ /** Only applicable when textSplitMode is set to 'pages'. If specified, the SplitSkill will discontinue splitting after processing the first 'maximumPagesToTake' pages, in order to improve performance when only a few initial pages are needed from each document. */
7166
+ maximumPagesToTake?: number;
7167
+ /** Only applies if textSplitMode is set to pages. There are two possible values. The choice of the values will decide the length (maximumPageLength and pageOverlapLength) measurement. The default is 'characters', which means the length will be measured by character. */
7168
+ unit?: SplitSkillUnit;
7169
+ /** Only applies if the unit is set to azureOpenAITokens. If specified, the splitSkill will use these parameters when performing the tokenization. The parameters are a valid 'encoderModelName' and an optional 'allowedSpecialTokens' property. */
7170
+ azureOpenAITokenizerParameters?: AzureOpenAITokenizerParameters;
5845
7171
  }
5846
7172
 
7173
+ /**
7174
+ * Defines values for SplitSkillEncoderModelName. \
7175
+ * {@link KnownSplitSkillEncoderModelName} can be used interchangeably with SplitSkillEncoderModelName,
7176
+ * this enum contains the known values that the service supports.
7177
+ * ### Known values supported by the service
7178
+ * **r50k_base**: Refers to a base model trained with a 50,000 token vocabulary, often used in general natural language processing tasks. \
7179
+ * **p50k_base**: A base model with a 50,000 token vocabulary, optimized for prompt-based tasks. \
7180
+ * **p50k_edit**: Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token vocabulary. \
7181
+ * **cl100k_base**: A base model with a 100,000 token vocabulary.
7182
+ */
7183
+ export declare type SplitSkillEncoderModelName = string;
7184
+
5847
7185
  export declare type SplitSkillLanguage = `${KnownSplitSkillLanguage}`;
5848
7186
 
7187
+ /**
7188
+ * Defines values for SplitSkillUnit. \
7189
+ * {@link KnownSplitSkillUnit} can be used interchangeably with SplitSkillUnit,
7190
+ * this enum contains the known values that the service supports.
7191
+ * ### Known values supported by the service
7192
+ * **characters**: The length will be measured by character. \
7193
+ * **azureOpenAITokens**: The length will be measured by an AzureOpenAI tokenizer from the tiktoken library.
7194
+ */
7195
+ export declare type SplitSkillUnit = string;
7196
+
5849
7197
  /** Defines a data change detection policy that captures changes using the Integrated Change Tracking feature of Azure SQL Database. */
5850
7198
  export declare interface SqlIntegratedChangeTrackingPolicy extends BaseDataChangeDetectionPolicy {
5851
7199
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -6047,6 +7395,15 @@ export declare interface TagScoringParameters {
6047
7395
  tagsParameter: string;
6048
7396
  }
6049
7397
 
7398
+ /** The BM25 or Classic score for the text portion of the query. */
7399
+ export declare interface TextResult {
7400
+ /**
7401
+ * The BM25 or Classic score for the text portion of the query.
7402
+ * NOTE: This property will not be serialized. It can only be populated by the server.
7403
+ */
7404
+ readonly searchScore?: number;
7405
+ }
7406
+
6050
7407
  export declare type TextSplitMode = `${KnownTextSplitMode}`;
6051
7408
 
6052
7409
  /** A skill to translate text from one language to another. */
@@ -6071,6 +7428,18 @@ export declare interface TextWeights {
6071
7428
  };
6072
7429
  }
6073
7430
 
7431
+ /**
7432
+ * Specifies the properties for connecting to an AML vectorizer with a managed identity.
7433
+ */
7434
+ export declare interface TokenAuthAzureMachineLearningVectorizerParameters extends BaseAzureMachineLearningVectorizerParameters {
7435
+ /** Indicates how the service should attempt to identify itself to the AML instance */
7436
+ authKind: "token";
7437
+ /** The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/\{guid\}/resourceGroups/\{resource-group-name\}/Microsoft.MachineLearningServices/workspaces/\{workspace-name\}/services/\{service_name\}. */
7438
+ resourceId: string;
7439
+ /** The region the AML service is deployed in. */
7440
+ region?: string;
7441
+ }
7442
+
6074
7443
  /** Defines values for TokenCharacterKind. */
6075
7444
  export declare type TokenCharacterKind = "letter" | "digit" | "whitespace" | "punctuation" | "symbol";
6076
7445
 
@@ -6163,6 +7532,22 @@ export declare type VectorEncodingFormat = string;
6163
7532
 
6164
7533
  export declare type VectorFilterMode = `${KnownVectorFilterMode}`;
6165
7534
 
7535
+ /** The query parameters to use for vector search when a base 64 encoded binary of an image that needs to be vectorized is provided. */
7536
+ export declare interface VectorizableImageBinaryQuery<TModel extends object> extends BaseVectorQuery<TModel> {
7537
+ /** Polymorphic discriminator, which specifies the different types this object can be */
7538
+ kind: "imageBinary";
7539
+ /** The base64 encoded binary of an image to be vectorized to perform a vector search query. */
7540
+ binaryImage: string;
7541
+ }
7542
+
7543
+ /** The query parameters to use for vector search when an url that represents an image value that needs to be vectorized is provided. */
7544
+ export declare interface VectorizableImageUrlQuery<TModel extends object> extends BaseVectorQuery<TModel> {
7545
+ /** Polymorphic discriminator, which specifies the different types this object can be */
7546
+ kind: "imageUrl";
7547
+ /** The URL of an image to be vectorized to perform a vector search query. */
7548
+ url: string;
7549
+ }
7550
+
6166
7551
  /** The query parameters to use for vector search when a text value that needs to be vectorized is provided. */
6167
7552
  export declare interface VectorizableTextQuery<TModel extends object> extends BaseVectorQuery<TModel> {
6168
7553
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -6180,19 +7565,30 @@ export declare interface VectorizedQuery<TModel extends object> extends BaseVect
6180
7565
  }
6181
7566
 
6182
7567
  /** The query parameters for vector and hybrid search queries. */
6183
- export declare type VectorQuery<TModel extends object> = VectorizedQuery<TModel> | VectorizableTextQuery<TModel>;
7568
+ export declare type VectorQuery<TModel extends object> = VectorizedQuery<TModel> | VectorizableTextQuery<TModel> | VectorizableImageUrlQuery<TModel> | VectorizableImageBinaryQuery<TModel>;
6184
7569
 
6185
7570
  export declare type VectorQueryKind = `${KnownVectorQueryKind}`;
6186
7571
 
7572
+ export declare interface VectorsDebugInfo {
7573
+ /**
7574
+ * The breakdown of subscores of the document prior to the chosen result set fusion/combination method such as RRF.
7575
+ * NOTE: This property will not be serialized. It can only be populated by the server.
7576
+ */
7577
+ readonly subscores?: QueryResultDocumentSubscores;
7578
+ }
7579
+
6187
7580
  /** Contains configuration options related to vector search. */
6188
7581
  export declare interface VectorSearch {
6189
7582
  /** Defines combinations of configurations to use with vector search. */
6190
7583
  profiles?: VectorSearchProfile[];
6191
- /** Contains configuration options specific to the algorithm used during indexing or querying. */
7584
+ /** Contains configuration options specific to the algorithm used during indexing and/or querying. */
6192
7585
  algorithms?: VectorSearchAlgorithmConfiguration[];
6193
7586
  /** Contains configuration options on how to vectorize text vector queries. */
6194
7587
  vectorizers?: VectorSearchVectorizer[];
6195
- /** Contains configuration options specific to the compression method used during indexing or querying. */
7588
+ /**
7589
+ * Contains configuration options specific to the compression method used during indexing or
7590
+ * querying.
7591
+ */
6196
7592
  compressions?: VectorSearchCompression[];
6197
7593
  }
6198
7594
 
@@ -6253,7 +7649,7 @@ export declare interface VectorSearchProfile {
6253
7649
  }
6254
7650
 
6255
7651
  /** Contains configuration options on how to vectorize text vector queries. */
6256
- export declare type VectorSearchVectorizer = AzureOpenAIVectorizer | WebApiVectorizer;
7652
+ export declare type VectorSearchVectorizer = AIServicesVisionVectorizer | AzureMachineLearningVectorizer | AzureOpenAIVectorizer | WebApiVectorizer;
6257
7653
 
6258
7654
  /**
6259
7655
  * Defines values for VectorSearchVectorizerKind. \
@@ -6261,10 +7657,31 @@ export declare type VectorSearchVectorizer = AzureOpenAIVectorizer | WebApiVecto
6261
7657
  * this enum contains the known values that the service supports.
6262
7658
  * ### Known values supported by the service
6263
7659
  * **azureOpenAI**: Generate embeddings using an Azure OpenAI resource at query time. \
6264
- * **customWebApi**: Generate embeddings using a custom web endpoint at query time.
7660
+ * **customWebApi**: Generate embeddings using a custom web endpoint at query time. \
7661
+ * **aiServicesVision**: Generate embeddings for an image or text input at query time using the Azure AI Services Vision Vectorize API. \
7662
+ * **aml**: Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Studio Model Catalog at query time.
6265
7663
  */
6266
7664
  export declare type VectorSearchVectorizerKind = string;
6267
7665
 
7666
+ /** The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */
7667
+ export declare interface VectorSimilarityThreshold extends BaseVectorThreshold {
7668
+ /** Polymorphic discriminator, which specifies the different types this object can be */
7669
+ kind: "vectorSimilarity";
7670
+ /** The threshold will filter based on the similarity metric value. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */
7671
+ value: number;
7672
+ }
7673
+
7674
+ /** The threshold used for vector queries. */
7675
+ export declare type VectorThreshold = VectorSimilarityThreshold | SearchScoreThreshold;
7676
+
7677
+ /** Allows you to generate a vector embedding for a given image or text input using the Azure AI Services Vision Vectorize API. */
7678
+ export declare interface VisionVectorizeSkill extends BaseSearchIndexerSkill {
7679
+ /** Polymorphic discriminator, which specifies the different types this object can be */
7680
+ odatatype: "#Microsoft.Skills.Vision.VectorizeSkill";
7681
+ /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */
7682
+ modelVersion?: string;
7683
+ }
7684
+
6268
7685
  export declare type VisualFeature = `${KnownVisualFeature}`;
6269
7686
 
6270
7687
  /** Specifies the properties for connecting to a user-defined vectorizer. */