@azure/search-documents 12.1.0-beta.2 → 12.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/dist/index.js +519 -2244
  2. package/dist/index.js.map +1 -1
  3. package/dist-esm/src/errorModels.js.map +1 -1
  4. package/dist-esm/src/generated/data/models/index.js +7 -209
  5. package/dist-esm/src/generated/data/models/index.js.map +1 -1
  6. package/dist-esm/src/generated/data/models/mappers.js +42 -315
  7. package/dist-esm/src/generated/data/models/mappers.js.map +1 -1
  8. package/dist-esm/src/generated/data/models/parameters.js +0 -42
  9. package/dist-esm/src/generated/data/models/parameters.js.map +1 -1
  10. package/dist-esm/src/generated/data/operations/documents.js +0 -4
  11. package/dist-esm/src/generated/data/operations/documents.js.map +1 -1
  12. package/dist-esm/src/generated/data/searchClient.js +1 -1
  13. package/dist-esm/src/generated/data/searchClient.js.map +1 -1
  14. package/dist-esm/src/generated/service/models/index.js +100 -150
  15. package/dist-esm/src/generated/service/models/index.js.map +1 -1
  16. package/dist-esm/src/generated/service/models/mappers.js +129 -687
  17. package/dist-esm/src/generated/service/models/mappers.js.map +1 -1
  18. package/dist-esm/src/generated/service/models/parameters.js +1 -51
  19. package/dist-esm/src/generated/service/models/parameters.js.map +1 -1
  20. package/dist-esm/src/generated/service/operations/dataSources.js +1 -4
  21. package/dist-esm/src/generated/service/operations/dataSources.js.map +1 -1
  22. package/dist-esm/src/generated/service/operations/index.js +0 -1
  23. package/dist-esm/src/generated/service/operations/index.js.map +1 -1
  24. package/dist-esm/src/generated/service/operations/indexers.js +1 -29
  25. package/dist-esm/src/generated/service/operations/indexers.js.map +1 -1
  26. package/dist-esm/src/generated/service/operations/skillsets.js +1 -30
  27. package/dist-esm/src/generated/service/operations/skillsets.js.map +1 -1
  28. package/dist-esm/src/generated/service/operationsInterfaces/index.js +0 -1
  29. package/dist-esm/src/generated/service/operationsInterfaces/index.js.map +1 -1
  30. package/dist-esm/src/generated/service/operationsInterfaces/indexers.js.map +1 -1
  31. package/dist-esm/src/generated/service/operationsInterfaces/skillsets.js.map +1 -1
  32. package/dist-esm/src/generated/service/searchServiceClient.js +2 -3
  33. package/dist-esm/src/generated/service/searchServiceClient.js.map +1 -1
  34. package/dist-esm/src/index.js +3 -3
  35. package/dist-esm/src/index.js.map +1 -1
  36. package/dist-esm/src/indexModels.js.map +1 -1
  37. package/dist-esm/src/searchClient.js +3 -10
  38. package/dist-esm/src/searchClient.js.map +1 -1
  39. package/dist-esm/src/searchIndexClient.js +3 -152
  40. package/dist-esm/src/searchIndexClient.js.map +1 -1
  41. package/dist-esm/src/searchIndexerClient.js +0 -47
  42. package/dist-esm/src/searchIndexerClient.js.map +1 -1
  43. package/dist-esm/src/serviceModels.js.map +1 -1
  44. package/dist-esm/src/serviceUtils.js +39 -139
  45. package/dist-esm/src/serviceUtils.js.map +1 -1
  46. package/package.json +4 -3
  47. package/types/search-documents.d.ts +285 -1589
  48. package/dist-esm/src/generated/service/operations/aliases.js +0 -160
  49. package/dist-esm/src/generated/service/operations/aliases.js.map +0 -1
  50. package/dist-esm/src/generated/service/operationsInterfaces/aliases.js +0 -9
  51. package/dist-esm/src/generated/service/operationsInterfaces/aliases.js.map +0 -1
  52. package/dist-esm/src/generatedStringLiteralUnions.js +0 -4
  53. package/dist-esm/src/generatedStringLiteralUnions.js.map +0 -1
@@ -5,51 +5,9 @@ import { ExtendedCommonClientOptions } from '@azure/core-http-compat';
5
5
  import { KeyCredential } from '@azure/core-auth';
6
6
  import { OperationOptions } from '@azure/core-client';
7
7
  import { PagedAsyncIterableIterator } from '@azure/core-paging';
8
- import { Pipeline } from '@azure/core-rest-pipeline';
9
8
  import { RestError } from '@azure/core-rest-pipeline';
10
9
  import { TokenCredential } from '@azure/core-auth';
11
10
 
12
- /** Specifies the AI Services Vision parameters for vectorizing a query image or text. */
13
- export declare interface AIServicesVisionParameters {
14
- /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */
15
- modelVersion?: string;
16
- /** The resource URI of the AI Services resource. */
17
- resourceUri: string;
18
- /** API key of the designated AI Services resource. */
19
- apiKey?: string;
20
- /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the index, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */
21
- authIdentity?: SearchIndexerDataIdentity;
22
- }
23
-
24
- /** Specifies the AI Services Vision parameters for vectorizing a query image or text. */
25
- export declare interface AIServicesVisionVectorizer extends BaseVectorSearchVectorizer {
26
- /** Polymorphic discriminator, which specifies the different types this object can be */
27
- kind: "aiServicesVision";
28
- /** Contains the parameters specific to AI Services Vision embedding vectorization. */
29
- aIServicesVisionParameters?: AIServicesVisionParameters;
30
- }
31
-
32
- /**
33
- * Defines values for AIStudioModelCatalogName. \
34
- * {@link KnownAIStudioModelCatalogName} can be used interchangeably with AIStudioModelCatalogName,
35
- * this enum contains the known values that the service supports.
36
- * ### Known values supported by the service
37
- * **OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32** \
38
- * **OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336** \
39
- * **Facebook-DinoV2-Image-Embeddings-ViT-Base** \
40
- * **Facebook-DinoV2-Image-Embeddings-ViT-Giant** \
41
- * **Cohere-embed-v3-english** \
42
- * **Cohere-embed-v3-multilingual**
43
- */
44
- export declare type AIStudioModelCatalogName = string;
45
-
46
- /**
47
- * An iterator for listing the aliases that exist in the Search service. Will make requests
48
- * as needed during iteration. Use .byPage() to make one request to the server
49
- * per iteration.
50
- */
51
- export declare type AliasIterator = PagedAsyncIterableIterator<SearchIndexAlias, SearchIndexAlias[], {}>;
52
-
53
11
  /** Information about a token returned by an analyzer. */
54
12
  export declare interface AnalyzedTokenInfo {
55
13
  /**
@@ -96,11 +54,6 @@ export declare interface AnalyzeRequest {
96
54
  * NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.
97
55
  */
98
56
  tokenizerName?: LexicalTokenizerName;
99
- /**
100
- * The name of the normalizer to use to normalize the given text. {@link KnownNormalizerNames} is
101
- * an enum containing built-in analyzer names.
102
- */
103
- normalizerName?: LexicalNormalizerName;
104
57
  /**
105
58
  * An optional list of token filters to use when breaking the given text. This parameter can only
106
59
  * be set when using the tokenizer parameter.
@@ -230,35 +183,6 @@ export declare interface AzureActiveDirectoryApplicationCredentials {
230
183
 
231
184
  export { AzureKeyCredential }
232
185
 
233
- /** The AML skill allows you to extend AI enrichment with a custom Azure Machine Learning (AML) model. Once an AML model is trained and deployed, an AML skill integrates it into AI enrichment. */
234
- export declare interface AzureMachineLearningSkill extends BaseSearchIndexerSkill {
235
- /** Polymorphic discriminator, which specifies the different types this object can be */
236
- odatatype: "#Microsoft.Skills.Custom.AmlSkill";
237
- /** (Required for no authentication or key authentication) The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */
238
- scoringUri?: string;
239
- /** (Required for key authentication) The key for the AML service. */
240
- authenticationKey?: string;
241
- /** (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. */
242
- resourceId?: string;
243
- /** (Optional) When specified, indicates the timeout for the http client making the API call. */
244
- timeout?: string;
245
- /** (Optional for token authentication). The region the AML service is deployed in. */
246
- region?: string;
247
- /** (Optional) When specified, indicates the number of calls the indexer will make in parallel to the endpoint you have provided. You can decrease this value if your endpoint is failing under too high of a request load, or raise it if your endpoint is able to accept more requests and you would like an increase in the performance of the indexer. If not set, a default value of 5 is used. The degreeOfParallelism can be set to a maximum of 10 and a minimum of 1. */
248
- degreeOfParallelism?: number;
249
- }
250
-
251
- /** Specifies an Azure Machine Learning endpoint deployed via the Azure AI Studio Model Catalog for generating the vector embedding of a query string. */
252
- export declare interface AzureMachineLearningVectorizer extends BaseVectorSearchVectorizer {
253
- /** Polymorphic discriminator, which specifies the different types this object can be */
254
- kind: "aml";
255
- /** Specifies the properties of the AML vectorizer. */
256
- amlParameters?: AzureMachineLearningVectorizerParameters;
257
- }
258
-
259
- /** Specifies the properties for connecting to an AML vectorizer. */
260
- export declare type AzureMachineLearningVectorizerParameters = NoAuthAzureMachineLearningVectorizerParameters | KeyAuthAzureMachineLearningVectorizerParameters | TokenAuthAzureMachineLearningVectorizerParameters;
261
-
262
186
  /** Allows you to generate a vector embedding for a given text input using the Azure OpenAI resource. */
263
187
  export declare interface AzureOpenAIEmbeddingSkill extends BaseSearchIndexerSkill, AzureOpenAIParameters {
264
188
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -274,15 +198,14 @@ export declare interface AzureOpenAIEmbeddingSkill extends BaseSearchIndexerSkil
274
198
  * ### Known values supported by the service
275
199
  * **text-embedding-ada-002** \
276
200
  * **text-embedding-3-large** \
277
- * **text-embedding-3-small** \
278
- * **experimental**
201
+ * **text-embedding-3-small**
279
202
  */
280
203
  export declare type AzureOpenAIModelName = string;
281
204
 
282
205
  /** Contains the parameters specific to using an Azure Open AI service for vectorization at query time. */
283
206
  export declare interface AzureOpenAIParameters {
284
207
  /** The resource uri for your Azure Open AI resource. */
285
- resourceUri?: string;
208
+ resourceUrl?: string;
286
209
  /** ID of your Azure Open AI model deployment on the designated resource. */
287
210
  deploymentId?: string;
288
211
  /** API key for the designated Azure Open AI resource. */
@@ -298,15 +221,7 @@ export declare interface AzureOpenAIVectorizer extends BaseVectorSearchVectorize
298
221
  /** Polymorphic discriminator, which specifies the different types this object can be */
299
222
  kind: "azureOpenAI";
300
223
  /** Contains the parameters specific to Azure Open AI embedding vectorization. */
301
- azureOpenAIParameters?: AzureOpenAIParameters;
302
- }
303
-
304
- /** Specifies the properties common between all AML vectorizer auth types. */
305
- export declare interface BaseAzureMachineLearningVectorizerParameters {
306
- /** When specified, indicates the timeout for the http client making the API call. */
307
- timeout?: string;
308
- /** The name of the embedding model from the Azure AI Studio Catalog that is deployed at the provided endpoint. */
309
- modelName?: AIStudioModelCatalogName;
224
+ parameters?: AzureOpenAIParameters;
310
225
  }
311
226
 
312
227
  /** Base type for character filters. */
@@ -334,7 +249,7 @@ export declare interface BaseDataChangeDetectionPolicy {
334
249
  /** Base type for data deletion detection policies. */
335
250
  export declare interface BaseDataDeletionDetectionPolicy {
336
251
  /** Polymorphic discriminator, which specifies the different types this object can be */
337
- odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy" | "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy";
252
+ odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy";
338
253
  }
339
254
 
340
255
  /** Base type for analyzers. */
@@ -345,14 +260,6 @@ export declare interface BaseLexicalAnalyzer {
345
260
  name: string;
346
261
  }
347
262
 
348
- /** Base type for normalizers. */
349
- export declare interface BaseLexicalNormalizer {
350
- /** Polymorphic discriminator, which specifies the different types this object can be */
351
- odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
352
- /** The name of the normalizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. */
353
- name: string;
354
- }
355
-
356
263
  /** Base type for tokenizers. */
357
264
  export declare interface BaseLexicalTokenizer {
358
265
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -382,7 +289,7 @@ export declare interface BaseSearchIndexerDataIdentity {
382
289
  /** Base type for skills. */
383
290
  export declare interface BaseSearchIndexerSkill {
384
291
  /** Polymorphic discriminator, which specifies the different types this object can be */
385
- odatatype: "#Microsoft.Skills.Util.ConditionalSkill" | "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" | "#Microsoft.Skills.Vision.OcrSkill" | "#Microsoft.Skills.Vision.ImageAnalysisSkill" | "#Microsoft.Skills.Text.LanguageDetectionSkill" | "#Microsoft.Skills.Util.ShaperSkill" | "#Microsoft.Skills.Text.MergeSkill" | "#Microsoft.Skills.Text.EntityRecognitionSkill" | "#Microsoft.Skills.Text.SentimentSkill" | "#Microsoft.Skills.Text.V3.SentimentSkill" | "#Microsoft.Skills.Text.V3.EntityLinkingSkill" | "#Microsoft.Skills.Text.V3.EntityRecognitionSkill" | "#Microsoft.Skills.Text.PIIDetectionSkill" | "#Microsoft.Skills.Text.SplitSkill" | "#Microsoft.Skills.Text.CustomEntityLookupSkill" | "#Microsoft.Skills.Text.TranslationSkill" | "#Microsoft.Skills.Util.DocumentExtractionSkill" | "#Microsoft.Skills.Custom.WebApiSkill" | "#Microsoft.Skills.Custom.AmlSkill" | "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill" | "#Microsoft.Skills.Vision.VectorizeSkill";
292
+ odatatype: "#Microsoft.Skills.Util.ConditionalSkill" | "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" | "#Microsoft.Skills.Vision.OcrSkill" | "#Microsoft.Skills.Vision.ImageAnalysisSkill" | "#Microsoft.Skills.Text.LanguageDetectionSkill" | "#Microsoft.Skills.Util.ShaperSkill" | "#Microsoft.Skills.Text.MergeSkill" | "#Microsoft.Skills.Text.EntityRecognitionSkill" | "#Microsoft.Skills.Text.SentimentSkill" | "#Microsoft.Skills.Text.V3.SentimentSkill" | "#Microsoft.Skills.Text.V3.EntityLinkingSkill" | "#Microsoft.Skills.Text.V3.EntityRecognitionSkill" | "#Microsoft.Skills.Text.PIIDetectionSkill" | "#Microsoft.Skills.Text.SplitSkill" | "#Microsoft.Skills.Text.CustomEntityLookupSkill" | "#Microsoft.Skills.Text.TranslationSkill" | "#Microsoft.Skills.Util.DocumentExtractionSkill" | "#Microsoft.Skills.Custom.WebApiSkill" | "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill";
386
293
  /** The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. */
387
294
  name?: string;
388
295
  /** The description of the skill which describes the inputs, outputs, and usage of the skill. */
@@ -467,14 +374,6 @@ export declare interface BaseSearchRequestOptions<TModel extends object, TFields
467
374
  * fielded search expression take precedence over any field names listed in this parameter.
468
375
  */
469
376
  searchFields?: SearchFieldArray<TModel>;
470
- /**
471
- * The language of the query.
472
- */
473
- queryLanguage?: QueryLanguage;
474
- /**
475
- * Improve search recall by spell-correcting individual search query terms.
476
- */
477
- speller?: Speller;
478
377
  /**
479
378
  * A value that specifies whether any or all of the search terms must be matched in order to
480
379
  * count the document as a match. Possible values include: 'any', 'all'
@@ -516,8 +415,6 @@ export declare interface BaseSearchRequestOptions<TModel extends object, TFields
516
415
  * Defines options for vector search queries
517
416
  */
518
417
  vectorSearchOptions?: VectorSearchOptions<TModel>;
519
- /** The query parameters to configure hybrid search behaviors. */
520
- hybridSearch?: HybridSearchOptions;
521
418
  }
522
419
 
523
420
  /** Base type for token filters. */
@@ -534,8 +431,6 @@ export declare interface BaseVectorQuery<TModel extends object> {
534
431
  * ### Known values supported by the service
535
432
  * **vector**: Vector query where a raw vector value is provided.
536
433
  * **text**: Vector query where a text value that needs to be vectorized is provided.
537
- * **imageUrl**: Vector query where an url that represents an image value that needs to be vectorized is provided.
538
- * **imageBinary**: Vector query where a base 64 encoded binary of an image that needs to be vectorized is provided.
539
434
  */
540
435
  kind: VectorQueryKind;
541
436
  /** Number of nearest neighbors to return as top hits. */
@@ -557,8 +452,6 @@ export declare interface BaseVectorQuery<TModel extends object> {
557
452
  oversampling?: number;
558
453
  /** Relative weight of the vector query when compared to other vector query and/or the text query within the same search request. This value is used when combining the results of multiple ranking lists produced by the different vector queries and/or the results retrieved through the text query. The higher the weight, the higher the documents that matched that query will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger than zero. */
559
454
  weight?: number;
560
- /** The threshold used for vector queries. Note this can only be set if all 'fields' use the same similarity metric. */
561
- threshold?: VectorThreshold;
562
455
  }
563
456
 
564
457
  /** Contains configuration options specific to the algorithm used during indexing and/or querying. */
@@ -570,11 +463,11 @@ export declare interface BaseVectorSearchAlgorithmConfiguration {
570
463
  }
571
464
 
572
465
  /** Contains configuration options specific to the compression method used during indexing or querying. */
573
- export declare interface BaseVectorSearchCompressionConfiguration {
466
+ export declare interface BaseVectorSearchCompression {
574
467
  /** Polymorphic discriminator, which specifies the different types this object can be */
575
- kind: "scalarQuantization";
468
+ kind: "scalarQuantization" | "binaryQuantization";
576
469
  /** The name to associate with this particular configuration. */
577
- name: string;
470
+ compressionName: string;
578
471
  /** If set to true, once the ordered set of results calculated using compressed vectors are obtained, they will be reranked again by recalculating the full-precision similarity scores. This will improve recall at the expense of latency. */
579
472
  rerankWithOriginalVectors?: boolean;
580
473
  /** Default oversampling factor. Oversampling will internally request more documents (specified by this multiplier) in the initial search. This increases the set of results that will be reranked using recomputed similarity scores from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. */
@@ -586,22 +479,22 @@ export declare interface BaseVectorSearchVectorizer {
586
479
  /** Polymorphic discriminator, which specifies the different types this object can be */
587
480
  kind: VectorSearchVectorizerKind;
588
481
  /** The name to associate with this particular vectorization method. */
589
- name: string;
482
+ vectorizerName: string;
590
483
  }
591
484
 
592
- /** The threshold used for vector queries. */
593
- export declare interface BaseVectorThreshold {
485
+ /** Contains configuration options specific to the binary quantization compression method used during indexing and querying. */
486
+ export declare interface BinaryQuantizationCompression extends BaseVectorSearchCompression {
594
487
  /** Polymorphic discriminator, which specifies the different types this object can be */
595
- kind: "vectorSimilarity" | "searchScore";
488
+ kind: "binaryQuantization";
596
489
  }
597
490
 
598
- export declare type BlobIndexerDataToExtract = "storageMetadata" | "allMetadata" | "contentAndMetadata";
491
+ export declare type BlobIndexerDataToExtract = `${KnownBlobIndexerDataToExtract}`;
599
492
 
600
- export declare type BlobIndexerImageAction = "none" | "generateNormalizedImages" | "generateNormalizedImagePerPage";
493
+ export declare type BlobIndexerImageAction = `${KnownBlobIndexerImageAction}`;
601
494
 
602
- export declare type BlobIndexerParsingMode = "default" | "text" | "delimitedText" | "json" | "jsonArray" | "jsonLines";
495
+ export declare type BlobIndexerParsingMode = `${KnownBlobIndexerParsingMode}`;
603
496
 
604
- export declare type BlobIndexerPDFTextRotationAlgorithm = "none" | "detectAngles";
497
+ export declare type BlobIndexerPDFTextRotationAlgorithm = `${KnownBlobIndexerPDFTextRotationAlgorithm}`;
605
498
 
606
499
  /** Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm that includes length normalization (controlled by the 'b' parameter) as well as term frequency saturation (controlled by the 'k1' parameter). */
607
500
  export declare interface BM25Similarity extends Similarity {
@@ -704,7 +597,7 @@ export declare interface ComplexField {
704
597
  /**
705
598
  * A list of sub-fields.
706
599
  */
707
- fields: SearchField[];
600
+ fields?: SearchField[];
708
601
  }
709
602
 
710
603
  /** A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. */
@@ -726,11 +619,6 @@ export declare interface CorsOptions {
726
619
  */
727
620
  export declare type CountDocumentsOptions = OperationOptions;
728
621
 
729
- /**
730
- * Options for create alias operation.
731
- */
732
- export declare type CreateAliasOptions = OperationOptions;
733
-
734
622
  /**
735
623
  * Options for create datasource operation.
736
624
  */
@@ -746,16 +634,6 @@ export declare type CreateIndexerOptions = OperationOptions;
746
634
  */
747
635
  export declare type CreateIndexOptions = OperationOptions;
748
636
 
749
- /**
750
- * Options for create or update alias operation.
751
- */
752
- export declare interface CreateOrUpdateAliasOptions extends OperationOptions {
753
- /**
754
- * If set to true, Resource will be deleted only if the etag matches.
755
- */
756
- onlyIfUnchanged?: boolean;
757
- }
758
-
759
637
  /**
760
638
  * Options for create/update datasource operation.
761
639
  */
@@ -764,10 +642,6 @@ export declare interface CreateorUpdateDataSourceConnectionOptions extends Opera
764
642
  * If set to true, Resource will be updated only if the etag matches.
765
643
  */
766
644
  onlyIfUnchanged?: boolean;
767
- /**
768
- * Ignores cache reset requirements.
769
- */
770
- skipIndexerResetRequirementForCache?: boolean;
771
645
  }
772
646
 
773
647
  /**
@@ -778,10 +652,6 @@ export declare interface CreateorUpdateIndexerOptions extends OperationOptions {
778
652
  * If set to true, Resource will be updated only if the etag matches.
779
653
  */
780
654
  onlyIfUnchanged?: boolean;
781
- /** Ignores cache reset requirements. */
782
- skipIndexerResetRequirementForCache?: boolean;
783
- /** Disables cache reprocessing change detection. */
784
- disableCacheReprocessingChangeDetection?: boolean;
785
655
  }
786
656
 
787
657
  /**
@@ -809,14 +679,6 @@ export declare interface CreateOrUpdateSkillsetOptions extends OperationOptions
809
679
  * If set to true, Resource will be updated only if the etag matches.
810
680
  */
811
681
  onlyIfUnchanged?: boolean;
812
- /**
813
- * Ignores cache reset requirements.
814
- */
815
- skipIndexerResetRequirementForCache?: boolean;
816
- /**
817
- * Disables cache reprocessing change detection.
818
- */
819
- disableCacheReprocessingChangeDetection?: boolean;
820
682
  }
821
683
 
822
684
  /**
@@ -941,41 +803,7 @@ export declare interface CustomEntityLookupSkill extends BaseSearchIndexerSkill
941
803
  globalDefaultFuzzyEditDistance?: number;
942
804
  }
943
805
 
944
- export declare type CustomEntityLookupSkillLanguage = "da" | "de" | "en" | "es" | "fi" | "fr" | "it" | "ko" | "pt";
945
-
946
- /** Allows you to configure normalization for filterable, sortable, and facetable fields, which by default operate with strict matching. This is a user-defined configuration consisting of at least one or more filters, which modify the token that is stored. */
947
- export declare interface CustomNormalizer extends BaseLexicalNormalizer {
948
- /** Polymorphic discriminator, which specifies the different types this object can be */
949
- odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
950
- /** A list of token filters used to filter out or modify the input token. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */
951
- tokenFilters?: TokenFilterName[];
952
- /** A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */
953
- charFilters?: CharFilterName[];
954
- }
955
-
956
- /** Contains the parameters specific to generating vector embeddings via a custom endpoint. */
957
- export declare interface CustomVectorizer extends BaseVectorSearchVectorizer {
958
- /** Polymorphic discriminator, which specifies the different types this object can be */
959
- kind: "customWebApi";
960
- /** Contains the parameters specific to generating vector embeddings via a custom endpoint. */
961
- customVectorizerParameters?: CustomVectorizerParameters;
962
- }
963
-
964
- /** Contains the parameters specific to generating vector embeddings via a custom endpoint. */
965
- export declare interface CustomVectorizerParameters {
966
- /** The uri for the Web API. */
967
- uri?: string;
968
- /** The headers required to make the http request. */
969
- httpHeaders?: Record<string, string>;
970
- /** The method for the http request. */
971
- httpMethod?: string;
972
- /** The desired timeout for the request. Default is 30 seconds. */
973
- timeout?: string;
974
- /** Applies to custom endpoints that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the vectorization connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */
975
- authResourceId?: string;
976
- /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */
977
- authIdentity?: SearchIndexerDataIdentity;
978
- }
806
+ export declare type CustomEntityLookupSkillLanguage = `${KnownCustomEntityLookupSkillLanguage}`;
979
807
 
980
808
  /**
981
809
  * Contains the possible cases for DataChangeDetectionPolicy.
@@ -985,7 +813,7 @@ export declare type DataChangeDetectionPolicy = HighWaterMarkChangeDetectionPoli
985
813
  /**
986
814
  * Contains the possible cases for DataDeletionDetectionPolicy.
987
815
  */
988
- export declare type DataDeletionDetectionPolicy = SoftDeleteColumnDeletionDetectionPolicy | NativeBlobSoftDeleteDeletionDetectionPolicy;
816
+ export declare type DataDeletionDetectionPolicy = SoftDeleteColumnDeletionDetectionPolicy;
989
817
 
990
818
  /**
991
819
  * Default Batch Size
@@ -1008,16 +836,6 @@ export declare interface DefaultCognitiveServicesAccount extends BaseCognitiveSe
1008
836
  odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices";
1009
837
  }
1010
838
 
1011
- /**
1012
- * Options for delete alias operation.
1013
- */
1014
- export declare interface DeleteAliasOptions extends OperationOptions {
1015
- /**
1016
- * If set to true, Resource will be deleted only if the etag matches.
1017
- */
1018
- onlyIfUnchanged?: boolean;
1019
- }
1020
-
1021
839
  /**
1022
840
  * Options for delete datasource operation.
1023
841
  */
@@ -1105,15 +923,6 @@ export declare interface DistanceScoringParameters {
1105
923
  boostingDistance: number;
1106
924
  }
1107
925
 
1108
- /** Contains debugging information that can be used to further explore your search results. */
1109
- export declare interface DocumentDebugInfo {
1110
- /**
1111
- * Contains debugging information specific to semantic search queries.
1112
- * NOTE: This property will not be serialized. It can only be populated by the server.
1113
- */
1114
- readonly semantic?: SemanticDebugInfo;
1115
- }
1116
-
1117
926
  /** A skill that extracts content from a file within the enrichment pipeline. */
1118
927
  export declare interface DocumentExtractionSkill extends BaseSearchIndexerSkill {
1119
928
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -1182,7 +991,7 @@ export declare interface ElisionTokenFilter extends BaseTokenFilter {
1182
991
  articles?: string[];
1183
992
  }
1184
993
 
1185
- export declare type EntityCategory = "location" | "organization" | "person" | "quantity" | "datetime" | "url" | "email";
994
+ export declare type EntityCategory = `${KnownEntityCategory}`;
1186
995
 
1187
996
  /** Using the Text Analytics API, extracts linked entities from text. */
1188
997
  export declare interface EntityLinkingSkill extends BaseSearchIndexerSkill {
@@ -1214,7 +1023,7 @@ export declare interface EntityRecognitionSkill extends BaseSearchIndexerSkill {
1214
1023
  minimumPrecision?: number;
1215
1024
  }
1216
1025
 
1217
- export declare type EntityRecognitionSkillLanguage = "ar" | "cs" | "zh-Hans" | "zh-Hant" | "da" | "nl" | "en" | "fi" | "fr" | "de" | "el" | "hu" | "it" | "ja" | "ko" | "no" | "pl" | "pt-PT" | "pt-BR" | "ru" | "es" | "sv" | "tr";
1026
+ export declare type EntityRecognitionSkillLanguage = `${KnownEntityRecognitionSkillLanguage}`;
1218
1027
 
1219
1028
  /** Using the Text Analytics API, extracts entities of different types from text. */
1220
1029
  export declare interface EntityRecognitionSkillV3 extends BaseSearchIndexerSkill {
@@ -1344,11 +1153,6 @@ export declare class GeographyPoint {
1344
1153
  toJSON(): Record<string, unknown>;
1345
1154
  }
1346
1155
 
1347
- /**
1348
- * Options for get alias operation.
1349
- */
1350
- export declare type GetAliasOptions = OperationOptions;
1351
-
1352
1156
  /**
1353
1157
  * Options for get datasource operation.
1354
1158
  */
@@ -1453,24 +1257,6 @@ export declare interface HnswParameters {
1453
1257
  metric?: VectorSearchAlgorithmMetric;
1454
1258
  }
1455
1259
 
1456
- /**
1457
- * Defines values for HybridCountAndFacetMode. \
1458
- * {@link KnownHybridCountAndFacetMode} can be used interchangeably with HybridCountAndFacetMode,
1459
- * this enum contains the known values that the service supports.
1460
- * ### Known values supported by the service
1461
- * **countRetrievableResults**: Only include documents that were matched within the 'maxTextRecallSize' retrieval window when computing 'count' and 'facets'. \
1462
- * **countAllResults**: Include all documents that were matched by the search query when computing 'count' and 'facets', regardless of whether or not those documents are within the 'maxTextRecallSize' retrieval window.
1463
- */
1464
- export declare type HybridCountAndFacetMode = string;
1465
-
1466
- /** TThe query parameters to configure hybrid search behaviors. */
1467
- export declare interface HybridSearchOptions {
1468
- /** Determines the maximum number of documents to be retrieved by the text query portion of a hybrid search request. Those documents will be combined with the documents matching the vector queries to produce a single final list of results. Choosing a larger maxTextRecallSize value will allow retrieving and paging through more documents (using the top and skip parameters), at the cost of higher resource utilization and higher latency. The value needs to be between 1 and 10,000. Default is 1000. */
1469
- maxTextRecallSize?: number;
1470
- /** Determines whether the count and facets should includes all documents that matched the search query, or only the documents that are retrieved within the 'maxTextRecallSize' window. */
1471
- countAndFacetMode?: HybridCountAndFacetMode;
1472
- }
1473
-
1474
1260
  /** A skill that analyzes image files. It extracts a rich set of visual features based on the image content. */
1475
1261
  export declare interface ImageAnalysisSkill extends BaseSearchIndexerSkill {
1476
1262
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -1495,9 +1281,9 @@ export declare interface ImageAnalysisSkill extends BaseSearchIndexerSkill {
1495
1281
  details?: ImageDetail[];
1496
1282
  }
1497
1283
 
1498
- export declare type ImageAnalysisSkillLanguage = "ar" | "az" | "bg" | "bs" | "ca" | "cs" | "cy" | "da" | "de" | "el" | "en" | "es" | "et" | "eu" | "fi" | "fr" | "ga" | "gl" | "he" | "hi" | "hr" | "hu" | "id" | "it" | "ja" | "kk" | "ko" | "lt" | "lv" | "mk" | "ms" | "nb" | "nl" | "pl" | "prs" | "pt-BR" | "pt" | "pt-PT" | "ro" | "ru" | "sk" | "sl" | "sr-Cyrl" | "sr-Latn" | "sv" | "th" | "tr" | "uk" | "vi" | "zh" | "zh-Hans" | "zh-Hant";
1284
+ export declare type ImageAnalysisSkillLanguage = `${KnownImageAnalysisSkillLanguage}`;
1499
1285
 
1500
- export declare type ImageDetail = "celebrities" | "landmarks";
1286
+ export declare type ImageDetail = `${KnownImageDetail}`;
1501
1287
 
1502
1288
  /** Defines values for IndexActionType. */
1503
1289
  export declare type IndexActionType = "upload" | "merge" | "mergeOrUpload" | "delete";
@@ -1587,7 +1373,7 @@ export declare interface IndexDocumentsResult {
1587
1373
  readonly results: IndexingResult[];
1588
1374
  }
1589
1375
 
1590
- export declare type IndexerExecutionEnvironment = "standard" | "private";
1376
+ export declare type IndexerExecutionEnvironment = `${KnownIndexerExecutionEnvironment}`;
1591
1377
 
1592
1378
  /** Represents the result of an individual indexer execution. */
1593
1379
  export declare interface IndexerExecutionResult {
@@ -1596,16 +1382,6 @@ export declare interface IndexerExecutionResult {
1596
1382
  * NOTE: This property will not be serialized. It can only be populated by the server.
1597
1383
  */
1598
1384
  readonly status: IndexerExecutionStatus;
1599
- /**
1600
- * The outcome of this indexer execution.
1601
- * NOTE: This property will not be serialized. It can only be populated by the server.
1602
- */
1603
- readonly statusDetail?: IndexerExecutionStatusDetail;
1604
- /**
1605
- * All of the state that defines and dictates the indexer's current execution.
1606
- * NOTE: This property will not be serialized. It can only be populated by the server.
1607
- */
1608
- readonly currentState?: IndexerState;
1609
1385
  /**
1610
1386
  * The error message indicating the top-level error, if any.
1611
1387
  * NOTE: This property will not be serialized. It can only be populated by the server.
@@ -1656,67 +1432,9 @@ export declare interface IndexerExecutionResult {
1656
1432
  /** Defines values for IndexerExecutionStatus. */
1657
1433
  export declare type IndexerExecutionStatus = "transientFailure" | "success" | "inProgress" | "reset";
1658
1434
 
1659
- /**
1660
- * Defines values for IndexerExecutionStatusDetail. \
1661
- * {@link KnownIndexerExecutionStatusDetail} can be used interchangeably with IndexerExecutionStatusDetail,
1662
- * this enum contains the known values that the service supports.
1663
- * ### Known values supported by the service
1664
- * **resetDocs**: Indicates that the reset that occurred was for a call to ResetDocs.
1665
- */
1666
- export declare type IndexerExecutionStatusDetail = string;
1667
-
1668
- /** Represents all of the state that defines and dictates the indexer's current execution. */
1669
- export declare interface IndexerState {
1670
- /**
1671
- * The mode the indexer is running in.
1672
- * NOTE: This property will not be serialized. It can only be populated by the server.
1673
- */
1674
- readonly mode?: IndexingMode;
1675
- /**
1676
- * Change tracking state used when indexing starts on all documents in the datasource.
1677
- * NOTE: This property will not be serialized. It can only be populated by the server.
1678
- */
1679
- readonly allDocumentsInitialChangeTrackingState?: string;
1680
- /**
1681
- * Change tracking state value when indexing finishes on all documents in the datasource.
1682
- * NOTE: This property will not be serialized. It can only be populated by the server.
1683
- */
1684
- readonly allDocumentsFinalChangeTrackingState?: string;
1685
- /**
1686
- * Change tracking state used when indexing starts on select, reset documents in the datasource.
1687
- * NOTE: This property will not be serialized. It can only be populated by the server.
1688
- */
1689
- readonly resetDocumentsInitialChangeTrackingState?: string;
1690
- /**
1691
- * Change tracking state value when indexing finishes on select, reset documents in the datasource.
1692
- * NOTE: This property will not be serialized. It can only be populated by the server.
1693
- */
1694
- readonly resetDocumentsFinalChangeTrackingState?: string;
1695
- /**
1696
- * The list of document keys that have been reset. The document key is the document's unique identifier for the data in the search index. The indexer will prioritize selectively re-ingesting these keys.
1697
- * NOTE: This property will not be serialized. It can only be populated by the server.
1698
- */
1699
- readonly resetDocumentKeys?: string[];
1700
- /**
1701
- * The list of datasource document ids that have been reset. The datasource document id is the unique identifier for the data in the datasource. The indexer will prioritize selectively re-ingesting these ids.
1702
- * NOTE: This property will not be serialized. It can only be populated by the server.
1703
- */
1704
- readonly resetDatasourceDocumentIds?: string[];
1705
- }
1706
-
1707
1435
  /** Defines values for IndexerStatus. */
1708
1436
  export declare type IndexerStatus = "unknown" | "error" | "running";
1709
1437
 
1710
- /**
1711
- * Defines values for IndexingMode. \
1712
- * {@link KnownIndexingMode} can be used interchangeably with IndexingMode,
1713
- * this enum contains the known values that the service supports.
1714
- * ### Known values supported by the service
1715
- * **indexingAllDocs**: The indexer is indexing all documents in the datasource. \
1716
- * **indexingResetDocs**: The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status.
1717
- */
1718
- export declare type IndexingMode = string;
1719
-
1720
1438
  /** Represents parameters for indexer execution. */
1721
1439
  export declare interface IndexingParameters {
1722
1440
  /** The number of items that are read from the data source and indexed as a single batch in order to improve performance. The default depends on the data source type. */
@@ -1845,18 +1563,6 @@ export declare interface KeepTokenFilter extends BaseTokenFilter {
1845
1563
  lowerCaseKeepWords?: boolean;
1846
1564
  }
1847
1565
 
1848
- /**
1849
- * Specifies the properties for connecting to an AML vectorizer with an authentication key.
1850
- */
1851
- export declare interface KeyAuthAzureMachineLearningVectorizerParameters extends BaseAzureMachineLearningVectorizerParameters {
1852
- /** Indicates how the service should attempt to identify itself to the AML instance */
1853
- authKind: "key";
1854
- /** The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */
1855
- scoringUri: string;
1856
- /** The key for the AML service. */
1857
- authenticationKey: string;
1858
- }
1859
-
1860
1566
  /** A skill that uses text analytics for key phrase extraction. */
1861
1567
  export declare interface KeyPhraseExtractionSkill extends BaseSearchIndexerSkill {
1862
1568
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -1869,7 +1575,7 @@ export declare interface KeyPhraseExtractionSkill extends BaseSearchIndexerSkill
1869
1575
  modelVersion?: string;
1870
1576
  }
1871
1577
 
1872
- export declare type KeyPhraseExtractionSkillLanguage = "da" | "nl" | "en" | "fi" | "fr" | "de" | "it" | "ja" | "ko" | "no" | "pl" | "pt-PT" | "pt-BR" | "ru" | "es" | "sv";
1578
+ export declare type KeyPhraseExtractionSkillLanguage = `${KnownKeyPhraseExtractionSkillLanguage}`;
1873
1579
 
1874
1580
  /** Marks terms as keywords. This token filter is implemented using Apache Lucene. */
1875
1581
  export declare interface KeywordMarkerTokenFilter extends BaseTokenFilter {
@@ -1902,22 +1608,6 @@ export declare interface KeywordTokenizer {
1902
1608
  maxTokenLength?: number;
1903
1609
  }
1904
1610
 
1905
- /** Known values of {@link AIStudioModelCatalogName} that the service accepts. */
1906
- export declare enum KnownAIStudioModelCatalogName {
1907
- /** OpenAIClipImageTextEmbeddingsVitBasePatch32 */
1908
- OpenAIClipImageTextEmbeddingsVitBasePatch32 = "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32",
1909
- /** OpenAIClipImageTextEmbeddingsViTLargePatch14336 */
1910
- OpenAIClipImageTextEmbeddingsViTLargePatch14336 = "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336",
1911
- /** FacebookDinoV2ImageEmbeddingsViTBase */
1912
- FacebookDinoV2ImageEmbeddingsViTBase = "Facebook-DinoV2-Image-Embeddings-ViT-Base",
1913
- /** FacebookDinoV2ImageEmbeddingsViTGiant */
1914
- FacebookDinoV2ImageEmbeddingsViTGiant = "Facebook-DinoV2-Image-Embeddings-ViT-Giant",
1915
- /** CohereEmbedV3English */
1916
- CohereEmbedV3English = "Cohere-embed-v3-english",
1917
- /** CohereEmbedV3Multilingual */
1918
- CohereEmbedV3Multilingual = "Cohere-embed-v3-multilingual"
1919
- }
1920
-
1921
1611
  /**
1922
1612
  * Defines values for AnalyzerName.
1923
1613
  * See https://docs.microsoft.com/rest/api/searchservice/Language-support
@@ -2304,9 +1994,7 @@ export declare enum KnownAzureOpenAIModelName {
2304
1994
  /** TextEmbedding3Large */
2305
1995
  TextEmbedding3Large = "text-embedding-3-large",
2306
1996
  /** TextEmbedding3Small */
2307
- TextEmbedding3Small = "text-embedding-3-small",
2308
- /** Experimental */
2309
- Experimental = "experimental"
1997
+ TextEmbedding3Small = "text-embedding-3-small"
2310
1998
  }
2311
1999
 
2312
2000
  /** Known values of {@link BlobIndexerDataToExtract} that the service accepts. */
@@ -2354,20 +2042,8 @@ export declare enum KnownBlobIndexerPDFTextRotationAlgorithm {
2354
2042
  }
2355
2043
 
2356
2044
  /** Known values of {@link CharFilterName} that the service accepts. */
2357
- export declare enum KnownCharFilterName {
2358
- /** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */
2359
- HtmlStrip = "html_strip"
2360
- }
2361
-
2362
- /**
2363
- * Defines values for CharFilterName.
2364
- * @readonly
2365
- */
2366
2045
  export declare enum KnownCharFilterNames {
2367
- /**
2368
- * A character filter that attempts to strip out HTML constructs. See
2369
- * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.html
2370
- */
2046
+ /** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */
2371
2047
  HtmlStrip = "html_strip"
2372
2048
  }
2373
2049
 
@@ -2461,14 +2137,6 @@ export declare enum KnownEntityRecognitionSkillLanguage {
2461
2137
  Tr = "tr"
2462
2138
  }
2463
2139
 
2464
- /** Known values of {@link HybridCountAndFacetMode} that the service accepts. */
2465
- export declare enum KnownHybridCountAndFacetMode {
2466
- /** Only include documents that were matched within the 'maxTextRecallSize' retrieval window when computing 'count' and 'facets'. */
2467
- CountRetrievableResults = "countRetrievableResults",
2468
- /** Include all documents that were matched by the search query when computing 'count' and 'facets', regardless of whether or not those documents are within the 'maxTextRecallSize' retrieval window. */
2469
- CountAllResults = "countAllResults"
2470
- }
2471
-
2472
2140
  /** Known values of {@link ImageAnalysisSkillLanguage} that the service accepts. */
2473
2141
  export declare enum KnownImageAnalysisSkillLanguage {
2474
2142
  /** Arabic */
@@ -2593,20 +2261,6 @@ export declare enum KnownIndexerExecutionEnvironment {
2593
2261
  Private = "private"
2594
2262
  }
2595
2263
 
2596
- /** Known values of {@link IndexerExecutionStatusDetail} that the service accepts. */
2597
- export declare enum KnownIndexerExecutionStatusDetail {
2598
- /** Indicates that the reset that occurred was for a call to ResetDocs. */
2599
- ResetDocs = "resetDocs"
2600
- }
2601
-
2602
- /** Known values of {@link IndexingMode} that the service accepts. */
2603
- export declare enum KnownIndexingMode {
2604
- /** The indexer is indexing all documents in the datasource. */
2605
- IndexingAllDocs = "indexingAllDocs",
2606
- /** The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status. */
2607
- IndexingResetDocs = "indexingResetDocs"
2608
- }
2609
-
2610
2264
  /** Known values of {@link IndexProjectionMode} that the service accepts. */
2611
2265
  export declare enum KnownIndexProjectionMode {
2612
2266
  /** The source document will be skipped from writing into the indexer's target index. */
@@ -2651,244 +2305,8 @@ export declare enum KnownKeyPhraseExtractionSkillLanguage {
2651
2305
  Sv = "sv"
2652
2306
  }
2653
2307
 
2654
- /** Known values of {@link LexicalAnalyzerName} that the service accepts. */
2655
- export declare enum KnownLexicalAnalyzerName {
2656
- /** Microsoft analyzer for Arabic. */
2657
- ArMicrosoft = "ar.microsoft",
2658
- /** Lucene analyzer for Arabic. */
2659
- ArLucene = "ar.lucene",
2660
- /** Lucene analyzer for Armenian. */
2661
- HyLucene = "hy.lucene",
2662
- /** Microsoft analyzer for Bangla. */
2663
- BnMicrosoft = "bn.microsoft",
2664
- /** Lucene analyzer for Basque. */
2665
- EuLucene = "eu.lucene",
2666
- /** Microsoft analyzer for Bulgarian. */
2667
- BgMicrosoft = "bg.microsoft",
2668
- /** Lucene analyzer for Bulgarian. */
2669
- BgLucene = "bg.lucene",
2670
- /** Microsoft analyzer for Catalan. */
2671
- CaMicrosoft = "ca.microsoft",
2672
- /** Lucene analyzer for Catalan. */
2673
- CaLucene = "ca.lucene",
2674
- /** Microsoft analyzer for Chinese (Simplified). */
2675
- ZhHansMicrosoft = "zh-Hans.microsoft",
2676
- /** Lucene analyzer for Chinese (Simplified). */
2677
- ZhHansLucene = "zh-Hans.lucene",
2678
- /** Microsoft analyzer for Chinese (Traditional). */
2679
- ZhHantMicrosoft = "zh-Hant.microsoft",
2680
- /** Lucene analyzer for Chinese (Traditional). */
2681
- ZhHantLucene = "zh-Hant.lucene",
2682
- /** Microsoft analyzer for Croatian. */
2683
- HrMicrosoft = "hr.microsoft",
2684
- /** Microsoft analyzer for Czech. */
2685
- CsMicrosoft = "cs.microsoft",
2686
- /** Lucene analyzer for Czech. */
2687
- CsLucene = "cs.lucene",
2688
- /** Microsoft analyzer for Danish. */
2689
- DaMicrosoft = "da.microsoft",
2690
- /** Lucene analyzer for Danish. */
2691
- DaLucene = "da.lucene",
2692
- /** Microsoft analyzer for Dutch. */
2693
- NlMicrosoft = "nl.microsoft",
2694
- /** Lucene analyzer for Dutch. */
2695
- NlLucene = "nl.lucene",
2696
- /** Microsoft analyzer for English. */
2697
- EnMicrosoft = "en.microsoft",
2698
- /** Lucene analyzer for English. */
2699
- EnLucene = "en.lucene",
2700
- /** Microsoft analyzer for Estonian. */
2701
- EtMicrosoft = "et.microsoft",
2702
- /** Microsoft analyzer for Finnish. */
2703
- FiMicrosoft = "fi.microsoft",
2704
- /** Lucene analyzer for Finnish. */
2705
- FiLucene = "fi.lucene",
2706
- /** Microsoft analyzer for French. */
2707
- FrMicrosoft = "fr.microsoft",
2708
- /** Lucene analyzer for French. */
2709
- FrLucene = "fr.lucene",
2710
- /** Lucene analyzer for Galician. */
2711
- GlLucene = "gl.lucene",
2712
- /** Microsoft analyzer for German. */
2713
- DeMicrosoft = "de.microsoft",
2714
- /** Lucene analyzer for German. */
2715
- DeLucene = "de.lucene",
2716
- /** Microsoft analyzer for Greek. */
2717
- ElMicrosoft = "el.microsoft",
2718
- /** Lucene analyzer for Greek. */
2719
- ElLucene = "el.lucene",
2720
- /** Microsoft analyzer for Gujarati. */
2721
- GuMicrosoft = "gu.microsoft",
2722
- /** Microsoft analyzer for Hebrew. */
2723
- HeMicrosoft = "he.microsoft",
2724
- /** Microsoft analyzer for Hindi. */
2725
- HiMicrosoft = "hi.microsoft",
2726
- /** Lucene analyzer for Hindi. */
2727
- HiLucene = "hi.lucene",
2728
- /** Microsoft analyzer for Hungarian. */
2729
- HuMicrosoft = "hu.microsoft",
2730
- /** Lucene analyzer for Hungarian. */
2731
- HuLucene = "hu.lucene",
2732
- /** Microsoft analyzer for Icelandic. */
2733
- IsMicrosoft = "is.microsoft",
2734
- /** Microsoft analyzer for Indonesian (Bahasa). */
2735
- IdMicrosoft = "id.microsoft",
2736
- /** Lucene analyzer for Indonesian. */
2737
- IdLucene = "id.lucene",
2738
- /** Lucene analyzer for Irish. */
2739
- GaLucene = "ga.lucene",
2740
- /** Microsoft analyzer for Italian. */
2741
- ItMicrosoft = "it.microsoft",
2742
- /** Lucene analyzer for Italian. */
2743
- ItLucene = "it.lucene",
2744
- /** Microsoft analyzer for Japanese. */
2745
- JaMicrosoft = "ja.microsoft",
2746
- /** Lucene analyzer for Japanese. */
2747
- JaLucene = "ja.lucene",
2748
- /** Microsoft analyzer for Kannada. */
2749
- KnMicrosoft = "kn.microsoft",
2750
- /** Microsoft analyzer for Korean. */
2751
- KoMicrosoft = "ko.microsoft",
2752
- /** Lucene analyzer for Korean. */
2753
- KoLucene = "ko.lucene",
2754
- /** Microsoft analyzer for Latvian. */
2755
- LvMicrosoft = "lv.microsoft",
2756
- /** Lucene analyzer for Latvian. */
2757
- LvLucene = "lv.lucene",
2758
- /** Microsoft analyzer for Lithuanian. */
2759
- LtMicrosoft = "lt.microsoft",
2760
- /** Microsoft analyzer for Malayalam. */
2761
- MlMicrosoft = "ml.microsoft",
2762
- /** Microsoft analyzer for Malay (Latin). */
2763
- MsMicrosoft = "ms.microsoft",
2764
- /** Microsoft analyzer for Marathi. */
2765
- MrMicrosoft = "mr.microsoft",
2766
- /** Microsoft analyzer for Norwegian (Bokmål). */
2767
- NbMicrosoft = "nb.microsoft",
2768
- /** Lucene analyzer for Norwegian. */
2769
- NoLucene = "no.lucene",
2770
- /** Lucene analyzer for Persian. */
2771
- FaLucene = "fa.lucene",
2772
- /** Microsoft analyzer for Polish. */
2773
- PlMicrosoft = "pl.microsoft",
2774
- /** Lucene analyzer for Polish. */
2775
- PlLucene = "pl.lucene",
2776
- /** Microsoft analyzer for Portuguese (Brazil). */
2777
- PtBrMicrosoft = "pt-BR.microsoft",
2778
- /** Lucene analyzer for Portuguese (Brazil). */
2779
- PtBrLucene = "pt-BR.lucene",
2780
- /** Microsoft analyzer for Portuguese (Portugal). */
2781
- PtPtMicrosoft = "pt-PT.microsoft",
2782
- /** Lucene analyzer for Portuguese (Portugal). */
2783
- PtPtLucene = "pt-PT.lucene",
2784
- /** Microsoft analyzer for Punjabi. */
2785
- PaMicrosoft = "pa.microsoft",
2786
- /** Microsoft analyzer for Romanian. */
2787
- RoMicrosoft = "ro.microsoft",
2788
- /** Lucene analyzer for Romanian. */
2789
- RoLucene = "ro.lucene",
2790
- /** Microsoft analyzer for Russian. */
2791
- RuMicrosoft = "ru.microsoft",
2792
- /** Lucene analyzer for Russian. */
2793
- RuLucene = "ru.lucene",
2794
- /** Microsoft analyzer for Serbian (Cyrillic). */
2795
- SrCyrillicMicrosoft = "sr-cyrillic.microsoft",
2796
- /** Microsoft analyzer for Serbian (Latin). */
2797
- SrLatinMicrosoft = "sr-latin.microsoft",
2798
- /** Microsoft analyzer for Slovak. */
2799
- SkMicrosoft = "sk.microsoft",
2800
- /** Microsoft analyzer for Slovenian. */
2801
- SlMicrosoft = "sl.microsoft",
2802
- /** Microsoft analyzer for Spanish. */
2803
- EsMicrosoft = "es.microsoft",
2804
- /** Lucene analyzer for Spanish. */
2805
- EsLucene = "es.lucene",
2806
- /** Microsoft analyzer for Swedish. */
2807
- SvMicrosoft = "sv.microsoft",
2808
- /** Lucene analyzer for Swedish. */
2809
- SvLucene = "sv.lucene",
2810
- /** Microsoft analyzer for Tamil. */
2811
- TaMicrosoft = "ta.microsoft",
2812
- /** Microsoft analyzer for Telugu. */
2813
- TeMicrosoft = "te.microsoft",
2814
- /** Microsoft analyzer for Thai. */
2815
- ThMicrosoft = "th.microsoft",
2816
- /** Lucene analyzer for Thai. */
2817
- ThLucene = "th.lucene",
2818
- /** Microsoft analyzer for Turkish. */
2819
- TrMicrosoft = "tr.microsoft",
2820
- /** Lucene analyzer for Turkish. */
2821
- TrLucene = "tr.lucene",
2822
- /** Microsoft analyzer for Ukrainian. */
2823
- UkMicrosoft = "uk.microsoft",
2824
- /** Microsoft analyzer for Urdu. */
2825
- UrMicrosoft = "ur.microsoft",
2826
- /** Microsoft analyzer for Vietnamese. */
2827
- ViMicrosoft = "vi.microsoft",
2828
- /** Standard Lucene analyzer. */
2829
- StandardLucene = "standard.lucene",
2830
- /** Standard ASCII Folding Lucene analyzer. See https:\//docs.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#Analyzers */
2831
- StandardAsciiFoldingLucene = "standardasciifolding.lucene",
2832
- /** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordAnalyzer.html */
2833
- Keyword = "keyword",
2834
- /** Flexibly separates text into terms via a regular expression pattern. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/PatternAnalyzer.html */
2835
- Pattern = "pattern",
2836
- /** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/SimpleAnalyzer.html */
2837
- Simple = "simple",
2838
- /** Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopAnalyzer.html */
2839
- Stop = "stop",
2840
- /** An analyzer that uses the whitespace tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceAnalyzer.html */
2841
- Whitespace = "whitespace"
2842
- }
2843
-
2844
- /** Known values of {@link LexicalNormalizerName} that the service accepts. */
2845
- declare enum KnownLexicalNormalizerName {
2846
- /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
2847
- AsciiFolding = "asciifolding",
2848
- /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
2849
- Elision = "elision",
2850
- /** Normalizes token text to lowercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
2851
- Lowercase = "lowercase",
2852
- /** Standard normalizer, which consists of lowercase and asciifolding. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
2853
- Standard = "standard",
2854
- /** Normalizes token text to uppercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
2855
- Uppercase = "uppercase"
2856
- }
2857
- export { KnownLexicalNormalizerName }
2858
- export { KnownLexicalNormalizerName as KnownNormalizerNames }
2859
-
2860
- /** Known values of {@link LexicalTokenizerName} that the service accepts. */
2861
- export declare enum KnownLexicalTokenizerName {
2862
- /** Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html */
2863
- Classic = "classic",
2864
- /** Tokenizes the input from an edge into n-grams of the given size(s). See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenizer.html */
2865
- EdgeNGram = "edgeNGram",
2866
- /** Emits the entire input as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordTokenizer.html */
2867
- Keyword = "keyword_v2",
2868
- /** Divides text at non-letters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LetterTokenizer.html */
2869
- Letter = "letter",
2870
- /** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseTokenizer.html */
2871
- Lowercase = "lowercase",
2872
- /** Divides text using language-specific rules. */
2873
- MicrosoftLanguageTokenizer = "microsoft_language_tokenizer",
2874
- /** Divides text using language-specific rules and reduces words to their base forms. */
2875
- MicrosoftLanguageStemmingTokenizer = "microsoft_language_stemming_tokenizer",
2876
- /** Tokenizes the input into n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenizer.html */
2877
- NGram = "nGram",
2878
- /** Tokenizer for path-like hierarchies. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/path\/PathHierarchyTokenizer.html */
2879
- PathHierarchy = "path_hierarchy_v2",
2880
- /** Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/pattern\/PatternTokenizer.html */
2881
- Pattern = "pattern",
2882
- /** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/StandardTokenizer.html */
2883
- Standard = "standard_v2",
2884
- /** Tokenizes urls and emails as one token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/UAX29URLEmailTokenizer.html */
2885
- UaxUrlEmail = "uax_url_email",
2886
- /** Divides text at whitespace. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html */
2887
- Whitespace = "whitespace"
2888
- }
2889
-
2890
- /** Known values of {@link LineEnding} that the service accepts. */
2891
- export declare enum KnownLineEnding {
2308
+ /** Known values of {@link OcrLineEnding} that the service accepts. */
2309
+ export declare enum KnownOcrLineEnding {
2892
2310
  /** Lines are separated by a single space character. */
2893
2311
  Space = "space",
2894
2312
  /** Lines are separated by a carriage return ('\r') character. */
@@ -3251,170 +2669,6 @@ export declare enum KnownPIIDetectionSkillMaskingMode {
3251
2669
  Replace = "replace"
3252
2670
  }
3253
2671
 
3254
- /** Known values of {@link QueryDebugMode} that the service accepts. */
3255
- export declare enum KnownQueryDebugMode {
3256
- /** No query debugging information will be returned. */
3257
- Disabled = "disabled",
3258
- /** Allows the user to further explore their reranked results. */
3259
- Semantic = "semantic"
3260
- }
3261
-
3262
- /** Known values of {@link QueryLanguage} that the service accepts. */
3263
- export declare enum KnownQueryLanguage {
3264
- /** Query language not specified. */
3265
- None = "none",
3266
- /** Query language value for English (United States). */
3267
- EnUs = "en-us",
3268
- /** Query language value for English (Great Britain). */
3269
- EnGb = "en-gb",
3270
- /** Query language value for English (India). */
3271
- EnIn = "en-in",
3272
- /** Query language value for English (Canada). */
3273
- EnCa = "en-ca",
3274
- /** Query language value for English (Australia). */
3275
- EnAu = "en-au",
3276
- /** Query language value for French (France). */
3277
- FrFr = "fr-fr",
3278
- /** Query language value for French (Canada). */
3279
- FrCa = "fr-ca",
3280
- /** Query language value for German (Germany). */
3281
- DeDe = "de-de",
3282
- /** Query language value for Spanish (Spain). */
3283
- EsEs = "es-es",
3284
- /** Query language value for Spanish (Mexico). */
3285
- EsMx = "es-mx",
3286
- /** Query language value for Chinese (China). */
3287
- ZhCn = "zh-cn",
3288
- /** Query language value for Chinese (Taiwan). */
3289
- ZhTw = "zh-tw",
3290
- /** Query language value for Portuguese (Brazil). */
3291
- PtBr = "pt-br",
3292
- /** Query language value for Portuguese (Portugal). */
3293
- PtPt = "pt-pt",
3294
- /** Query language value for Italian (Italy). */
3295
- ItIt = "it-it",
3296
- /** Query language value for Japanese (Japan). */
3297
- JaJp = "ja-jp",
3298
- /** Query language value for Korean (Korea). */
3299
- KoKr = "ko-kr",
3300
- /** Query language value for Russian (Russia). */
3301
- RuRu = "ru-ru",
3302
- /** Query language value for Czech (Czech Republic). */
3303
- CsCz = "cs-cz",
3304
- /** Query language value for Dutch (Belgium). */
3305
- NlBe = "nl-be",
3306
- /** Query language value for Dutch (Netherlands). */
3307
- NlNl = "nl-nl",
3308
- /** Query language value for Hungarian (Hungary). */
3309
- HuHu = "hu-hu",
3310
- /** Query language value for Polish (Poland). */
3311
- PlPl = "pl-pl",
3312
- /** Query language value for Swedish (Sweden). */
3313
- SvSe = "sv-se",
3314
- /** Query language value for Turkish (Turkey). */
3315
- TrTr = "tr-tr",
3316
- /** Query language value for Hindi (India). */
3317
- HiIn = "hi-in",
3318
- /** Query language value for Arabic (Saudi Arabia). */
3319
- ArSa = "ar-sa",
3320
- /** Query language value for Arabic (Egypt). */
3321
- ArEg = "ar-eg",
3322
- /** Query language value for Arabic (Morocco). */
3323
- ArMa = "ar-ma",
3324
- /** Query language value for Arabic (Kuwait). */
3325
- ArKw = "ar-kw",
3326
- /** Query language value for Arabic (Jordan). */
3327
- ArJo = "ar-jo",
3328
- /** Query language value for Danish (Denmark). */
3329
- DaDk = "da-dk",
3330
- /** Query language value for Norwegian (Norway). */
3331
- NoNo = "no-no",
3332
- /** Query language value for Bulgarian (Bulgaria). */
3333
- BgBg = "bg-bg",
3334
- /** Query language value for Croatian (Croatia). */
3335
- HrHr = "hr-hr",
3336
- /** Query language value for Croatian (Bosnia and Herzegovina). */
3337
- HrBa = "hr-ba",
3338
- /** Query language value for Malay (Malaysia). */
3339
- MsMy = "ms-my",
3340
- /** Query language value for Malay (Brunei Darussalam). */
3341
- MsBn = "ms-bn",
3342
- /** Query language value for Slovenian (Slovenia). */
3343
- SlSl = "sl-sl",
3344
- /** Query language value for Tamil (India). */
3345
- TaIn = "ta-in",
3346
- /** Query language value for Vietnamese (Viet Nam). */
3347
- ViVn = "vi-vn",
3348
- /** Query language value for Greek (Greece). */
3349
- ElGr = "el-gr",
3350
- /** Query language value for Romanian (Romania). */
3351
- RoRo = "ro-ro",
3352
- /** Query language value for Icelandic (Iceland). */
3353
- IsIs = "is-is",
3354
- /** Query language value for Indonesian (Indonesia). */
3355
- IdId = "id-id",
3356
- /** Query language value for Thai (Thailand). */
3357
- ThTh = "th-th",
3358
- /** Query language value for Lithuanian (Lithuania). */
3359
- LtLt = "lt-lt",
3360
- /** Query language value for Ukrainian (Ukraine). */
3361
- UkUa = "uk-ua",
3362
- /** Query language value for Latvian (Latvia). */
3363
- LvLv = "lv-lv",
3364
- /** Query language value for Estonian (Estonia). */
3365
- EtEe = "et-ee",
3366
- /** Query language value for Catalan. */
3367
- CaEs = "ca-es",
3368
- /** Query language value for Finnish (Finland). */
3369
- FiFi = "fi-fi",
3370
- /** Query language value for Serbian (Bosnia and Herzegovina). */
3371
- SrBa = "sr-ba",
3372
- /** Query language value for Serbian (Montenegro). */
3373
- SrMe = "sr-me",
3374
- /** Query language value for Serbian (Serbia). */
3375
- SrRs = "sr-rs",
3376
- /** Query language value for Slovak (Slovakia). */
3377
- SkSk = "sk-sk",
3378
- /** Query language value for Norwegian (Norway). */
3379
- NbNo = "nb-no",
3380
- /** Query language value for Armenian (Armenia). */
3381
- HyAm = "hy-am",
3382
- /** Query language value for Bengali (India). */
3383
- BnIn = "bn-in",
3384
- /** Query language value for Basque. */
3385
- EuEs = "eu-es",
3386
- /** Query language value for Galician. */
3387
- GlEs = "gl-es",
3388
- /** Query language value for Gujarati (India). */
3389
- GuIn = "gu-in",
3390
- /** Query language value for Hebrew (Israel). */
3391
- HeIl = "he-il",
3392
- /** Query language value for Irish (Ireland). */
3393
- GaIe = "ga-ie",
3394
- /** Query language value for Kannada (India). */
3395
- KnIn = "kn-in",
3396
- /** Query language value for Malayalam (India). */
3397
- MlIn = "ml-in",
3398
- /** Query language value for Marathi (India). */
3399
- MrIn = "mr-in",
3400
- /** Query language value for Persian (U.A.E.). */
3401
- FaAe = "fa-ae",
3402
- /** Query language value for Punjabi (India). */
3403
- PaIn = "pa-in",
3404
- /** Query language value for Telugu (India). */
3405
- TeIn = "te-in",
3406
- /** Query language value for Urdu (Pakistan). */
3407
- UrPk = "ur-pk"
3408
- }
3409
-
3410
- /** Known values of {@link QuerySpellerType} that the service accepts. */
3411
- export declare enum KnownQuerySpellerType {
3412
- /** Speller not enabled. */
3413
- None = "none",
3414
- /** Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter. */
3415
- Lexicon = "lexicon"
3416
- }
3417
-
3418
2672
  /** Known values of {@link RegexFlags} that the service accepts. */
3419
2673
  export declare enum KnownRegexFlags {
3420
2674
  /** Enables canonical equivalence. */
@@ -3453,6 +2707,36 @@ export declare enum KnownSearchAudience {
3453
2707
  AzurePublicCloud = "https://search.azure.com"
3454
2708
  }
3455
2709
 
2710
+ /** Known values of {@link SearchFieldDataType} that the service accepts. */
2711
+ export declare enum KnownSearchFieldDataType {
2712
+ /** Indicates that a field contains a string. */
2713
+ String = "Edm.String",
2714
+ /** Indicates that a field contains a 32-bit signed integer. */
2715
+ Int32 = "Edm.Int32",
2716
+ /** Indicates that a field contains a 64-bit signed integer. */
2717
+ Int64 = "Edm.Int64",
2718
+ /** Indicates that a field contains an IEEE double-precision floating point number. */
2719
+ Double = "Edm.Double",
2720
+ /** Indicates that a field contains a Boolean value (true or false). */
2721
+ Boolean = "Edm.Boolean",
2722
+ /** Indicates that a field contains a date\/time value, including timezone information. */
2723
+ DateTimeOffset = "Edm.DateTimeOffset",
2724
+ /** Indicates that a field contains a geo-location in terms of longitude and latitude. */
2725
+ GeographyPoint = "Edm.GeographyPoint",
2726
+ /** Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. */
2727
+ Complex = "Edm.ComplexType",
2728
+ /** Indicates that a field contains a single-precision floating point number. This is only valid when used with Collection(Edm.Single). */
2729
+ Single = "Edm.Single",
2730
+ /** Indicates that a field contains a half-precision floating point number. This is only valid when used with Collection(Edm.Half). */
2731
+ Half = "Edm.Half",
2732
+ /** Indicates that a field contains a 16-bit signed integer. This is only valid when used with Collection(Edm.Int16). */
2733
+ Int16 = "Edm.Int16",
2734
+ /** Indicates that a field contains a 8-bit signed integer. This is only valid when used with Collection(Edm.SByte). */
2735
+ SByte = "Edm.SByte",
2736
+ /** Indicates that a field contains a 8-bit unsigned integer. This is only valid when used with Collection(Edm.Byte). */
2737
+ Byte = "Edm.Byte"
2738
+ }
2739
+
3456
2740
  /** Known values of {@link SearchIndexerDataSourceType} that the service accepts. */
3457
2741
  export declare enum KnownSearchIndexerDataSourceType {
3458
2742
  /** Indicates an Azure SQL datasource. */
@@ -3466,9 +2750,7 @@ export declare enum KnownSearchIndexerDataSourceType {
3466
2750
  /** Indicates a MySql datasource. */
3467
2751
  MySql = "mysql",
3468
2752
  /** Indicates an ADLS Gen2 datasource. */
3469
- AdlsGen2 = "adlsgen2",
3470
- /** Indicates a Microsoft Fabric OneLake datasource. */
3471
- OneLake = "onelake"
2753
+ AdlsGen2 = "adlsgen2"
3472
2754
  }
3473
2755
 
3474
2756
  /** Known values of {@link SemanticErrorMode} that the service accepts. */
@@ -3481,7 +2763,7 @@ export declare enum KnownSemanticErrorMode {
3481
2763
 
3482
2764
  /** Known values of {@link SemanticErrorReason} that the service accepts. */
3483
2765
  export declare enum KnownSemanticErrorReason {
3484
- /** If 'semanticMaxWaitInMilliseconds' was set and the semantic processing duration exceeded that value. Only the base results were returned. */
2766
+ /** If `semanticMaxWaitInMilliseconds` was set and the semantic processing duration exceeded that value. Only the base results were returned. */
3485
2767
  MaxWaitExceeded = "maxWaitExceeded",
3486
2768
  /** The request was throttled. Only the base results were returned. */
3487
2769
  CapacityOverloaded = "capacityOverloaded",
@@ -3489,16 +2771,6 @@ export declare enum KnownSemanticErrorReason {
3489
2771
  Transient = "transient"
3490
2772
  }
3491
2773
 
3492
- /** Known values of {@link SemanticFieldState} that the service accepts. */
3493
- export declare enum KnownSemanticFieldState {
3494
- /** The field was fully used for semantic enrichment. */
3495
- Used = "used",
3496
- /** The field was not used for semantic enrichment. */
3497
- Unused = "unused",
3498
- /** The field was partially used for semantic enrichment. */
3499
- Partial = "partial"
3500
- }
3501
-
3502
2774
  /** Known values of {@link SemanticSearchResultsType} that the service accepts. */
3503
2775
  export declare enum KnownSemanticSearchResultsType {
3504
2776
  /** Results without any semantic enrichment or reranking. */
@@ -3541,14 +2813,6 @@ export declare enum KnownSentimentSkillLanguage {
3541
2813
  Tr = "tr"
3542
2814
  }
3543
2815
 
3544
- /** Known values of {@link Speller} that the service accepts. */
3545
- export declare enum KnownSpeller {
3546
- /** Speller not enabled. */
3547
- None = "none",
3548
- /** Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter. */
3549
- Lexicon = "lexicon"
3550
- }
3551
-
3552
2816
  /** Known values of {@link SplitSkillLanguage} that the service accepts. */
3553
2817
  export declare enum KnownSplitSkillLanguage {
3554
2818
  /** Amharic */
@@ -3776,7 +3040,7 @@ export declare enum KnownTextTranslationSkillLanguage {
3776
3040
  }
3777
3041
 
3778
3042
  /** Known values of {@link TokenFilterName} that the service accepts. */
3779
- export declare enum KnownTokenFilterName {
3043
+ export declare enum KnownTokenFilterNames {
3780
3044
  /** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html */
3781
3045
  ArabicNormalization = "arabic_normalization",
3782
3046
  /** Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html */
@@ -3831,7 +3095,7 @@ export declare enum KnownTokenFilterName {
3831
3095
  Snowball = "snowball",
3832
3096
  /** Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html */
3833
3097
  SoraniNormalization = "sorani_normalization",
3834
- /** Language specific stemming filter. See https:\//docs.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */
3098
+ /** Language specific stemming filter. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */
3835
3099
  Stemmer = "stemmer",
3836
3100
  /** Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html */
3837
3101
  Stopwords = "stopwords",
@@ -3847,257 +3111,33 @@ export declare enum KnownTokenFilterName {
3847
3111
  WordDelimiter = "word_delimiter"
3848
3112
  }
3849
3113
 
3850
- /**
3851
- * Defines values for TokenFilterName.
3852
- * @readonly
3853
- */
3854
- export declare enum KnownTokenFilterNames {
3855
- /**
3856
- * A token filter that applies the Arabic normalizer to normalize the orthography. See
3857
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html
3858
- */
3859
- ArabicNormalization = "arabic_normalization",
3860
- /**
3861
- * Strips all characters after an apostrophe (including the apostrophe itself). See
3862
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html
3863
- */
3864
- Apostrophe = "apostrophe",
3865
- /**
3866
- * Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127
3867
- * ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such
3868
- * equivalents exist. See
3869
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html
3870
- */
3871
- AsciiFolding = "asciifolding",
3872
- /**
3873
- * Forms bigrams of CJK terms that are generated from StandardTokenizer. See
3874
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html
3875
- */
3876
- CjkBigram = "cjk_bigram",
3877
- /**
3878
- * Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic
3879
- * Latin, and half-width Katakana variants into the equivalent Kana. See
3880
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html
3881
- */
3882
- CjkWidth = "cjk_width",
3883
- /**
3884
- * Removes English possessives, and dots from acronyms. See
3885
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html
3886
- */
3887
- Classic = "classic",
3888
- /**
3889
- * Construct bigrams for frequently occurring terms while indexing. Single terms are still
3890
- * indexed too, with bigrams overlaid. See
3891
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html
3892
- */
3893
- CommonGram = "common_grams",
3894
- /**
3895
- * Generates n-grams of the given size(s) starting from the front or the back of an input token.
3896
- * See
3897
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html
3898
- */
3899
- EdgeNGram = "edgeNGram_v2",
3900
- /**
3901
- * Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See
3902
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html
3903
- */
3904
- Elision = "elision",
3905
- /**
3906
- * Normalizes German characters according to the heuristics of the German2 snowball algorithm.
3907
- * See
3908
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html
3909
- */
3910
- GermanNormalization = "german_normalization",
3911
- /**
3912
- * Normalizes text in Hindi to remove some differences in spelling variations. See
3913
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html
3914
- */
3915
- HindiNormalization = "hindi_normalization",
3916
- /**
3917
- * Normalizes the Unicode representation of text in Indian languages. See
3918
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html
3919
- */
3920
- IndicNormalization = "indic_normalization",
3921
- /**
3922
- * Emits each incoming token twice, once as keyword and once as non-keyword. See
3923
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html
3924
- */
3925
- KeywordRepeat = "keyword_repeat",
3926
- /**
3927
- * A high-performance kstem filter for English. See
3928
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html
3929
- */
3930
- KStem = "kstem",
3931
- /**
3932
- * Removes words that are too long or too short. See
3933
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html
3934
- */
3935
- Length = "length",
3936
- /**
3937
- * Limits the number of tokens while indexing. See
3938
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html
3939
- */
3940
- Limit = "limit",
3941
- /**
3942
- * Normalizes token text to lower case. See
3943
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm
3944
- */
3945
- Lowercase = "lowercase",
3946
- /**
3947
- * Generates n-grams of the given size(s). See
3948
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html
3949
- */
3950
- NGram = "nGram_v2",
3951
- /**
3952
- * Applies normalization for Persian. See
3953
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html
3954
- */
3955
- PersianNormalization = "persian_normalization",
3956
- /**
3957
- * Create tokens for phonetic matches. See
3958
- * https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html
3959
- */
3960
- Phonetic = "phonetic",
3961
- /**
3962
- * Uses the Porter stemming algorithm to transform the token stream. See
3963
- * http://tartarus.org/~martin/PorterStemmer
3964
- */
3965
- PorterStem = "porter_stem",
3966
- /**
3967
- * Reverses the token string. See
3968
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html
3969
- */
3970
- Reverse = "reverse",
3971
- /**
3972
- * Normalizes use of the interchangeable Scandinavian characters. See
3973
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html
3974
- */
3975
- ScandinavianNormalization = "scandinavian_normalization",
3976
- /**
3977
- * Folds Scandinavian characters åÅäæÄÆ-&gt;a and öÖøØ-&gt;o. It also discriminates against use
3978
- * of double vowels aa, ae, ao, oe and oo, leaving just the first one. See
3979
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html
3980
- */
3981
- ScandinavianFoldingNormalization = "scandinavian_folding",
3982
- /**
3983
- * Creates combinations of tokens as a single token. See
3984
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html
3985
- */
3986
- Shingle = "shingle",
3987
- /**
3988
- * A filter that stems words using a Snowball-generated stemmer. See
3989
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html
3990
- */
3991
- Snowball = "snowball",
3992
- /**
3993
- * Normalizes the Unicode representation of Sorani text. See
3994
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html
3995
- */
3996
- SoraniNormalization = "sorani_normalization",
3997
- /**
3998
- * Language specific stemming filter. See
3999
- * https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters
4000
- */
4001
- Stemmer = "stemmer",
4002
- /**
4003
- * Removes stop words from a token stream. See
4004
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html
4005
- */
4006
- Stopwords = "stopwords",
4007
- /**
4008
- * Trims leading and trailing whitespace from tokens. See
4009
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html
4010
- */
4011
- Trim = "trim",
4012
- /**
4013
- * Truncates the terms to a specific length. See
4014
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html
4015
- */
4016
- Truncate = "truncate",
4017
- /**
4018
- * Filters out tokens with same text as the previous token. See
4019
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html
4020
- */
4021
- Unique = "unique",
4022
- /**
4023
- * Normalizes token text to upper case. See
4024
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html
4025
- */
4026
- Uppercase = "uppercase",
4027
- /**
4028
- * Splits words into subwords and performs optional transformations on subword groups.
4029
- */
4030
- WordDelimiter = "word_delimiter"
4031
- }
4032
-
4033
- /**
4034
- * Defines values for TokenizerName.
4035
- * @readonly
4036
- */
3114
+ /** Known values of {@link LexicalTokenizerName} that the service accepts. */
4037
3115
  export declare enum KnownTokenizerNames {
4038
- /**
4039
- * Grammar-based tokenizer that is suitable for processing most European-language documents. See
4040
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html
4041
- */
3116
+ /** Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html */
4042
3117
  Classic = "classic",
4043
- /**
4044
- * Tokenizes the input from an edge into n-grams of the given size(s). See
4045
- * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html
4046
- */
3118
+ /** Tokenizes the input from an edge into n-grams of the given size(s). See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenizer.html */
4047
3119
  EdgeNGram = "edgeNGram",
4048
- /**
4049
- * Emits the entire input as a single token. See
4050
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html
4051
- */
3120
+ /** Emits the entire input as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordTokenizer.html */
4052
3121
  Keyword = "keyword_v2",
4053
- /**
4054
- * Divides text at non-letters. See
4055
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LetterTokenizer.html
4056
- */
3122
+ /** Divides text at non-letters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LetterTokenizer.html */
4057
3123
  Letter = "letter",
4058
- /**
4059
- * Divides text at non-letters and converts them to lower case. See
4060
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseTokenizer.html
4061
- */
4062
- Lowercase = "lowercase",
4063
- /**
4064
- * Divides text using language-specific rules.
4065
- */
3124
+ /** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseTokenizer.html */
3125
+ Lowercase = "lowercase",
3126
+ /** Divides text using language-specific rules. */
4066
3127
  MicrosoftLanguageTokenizer = "microsoft_language_tokenizer",
4067
- /**
4068
- * Divides text using language-specific rules and reduces words to their base forms.
4069
- */
3128
+ /** Divides text using language-specific rules and reduces words to their base forms. */
4070
3129
  MicrosoftLanguageStemmingTokenizer = "microsoft_language_stemming_tokenizer",
4071
- /**
4072
- * Tokenizes the input into n-grams of the given size(s). See
4073
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html
4074
- */
3130
+ /** Tokenizes the input into n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenizer.html */
4075
3131
  NGram = "nGram",
4076
- /**
4077
- * Tokenizer for path-like hierarchies. See
4078
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html
4079
- */
3132
+ /** Tokenizer for path-like hierarchies. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/path\/PathHierarchyTokenizer.html */
4080
3133
  PathHierarchy = "path_hierarchy_v2",
4081
- /**
4082
- * Tokenizer that uses regex pattern matching to construct distinct tokens. See
4083
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html
4084
- */
3134
+ /** Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/pattern\/PatternTokenizer.html */
4085
3135
  Pattern = "pattern",
4086
- /**
4087
- * Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop
4088
- * filter. See
4089
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html
4090
- */
3136
+ /** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/StandardTokenizer.html */
4091
3137
  Standard = "standard_v2",
4092
- /**
4093
- * Tokenizes urls and emails as one token. See
4094
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html
4095
- */
3138
+ /** Tokenizes urls and emails as one token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/UAX29URLEmailTokenizer.html */
4096
3139
  UaxUrlEmail = "uax_url_email",
4097
- /**
4098
- * Divides text at whitespace. See
4099
- * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html
4100
- */
3140
+ /** Divides text at whitespace. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html */
4101
3141
  Whitespace = "whitespace"
4102
3142
  }
4103
3143
 
@@ -4107,26 +3147,52 @@ export declare enum KnownVectorEncodingFormat {
4107
3147
  PackedBit = "packedBit"
4108
3148
  }
4109
3149
 
3150
+ /** Known values of {@link VectorFilterMode} that the service accepts. */
3151
+ export declare enum KnownVectorFilterMode {
3152
+ /** The filter will be applied after the candidate set of vector results is returned. Depending on the filter selectivity, this can result in fewer results than requested by the parameter 'k'. */
3153
+ PostFilter = "postFilter",
3154
+ /** The filter will be applied before the search query. */
3155
+ PreFilter = "preFilter"
3156
+ }
3157
+
4110
3158
  /** Known values of {@link VectorQueryKind} that the service accepts. */
4111
3159
  export declare enum KnownVectorQueryKind {
4112
3160
  /** Vector query where a raw vector value is provided. */
4113
3161
  Vector = "vector",
4114
3162
  /** Vector query where a text value that needs to be vectorized is provided. */
4115
- Text = "text",
4116
- /** Vector query where an url that represents an image value that needs to be vectorized is provided. */
4117
- ImageUrl = "imageUrl",
4118
- /** Vector query where a base 64 encoded binary of an image that needs to be vectorized is provided. */
4119
- ImageBinary = "imageBinary"
3163
+ Text = "text"
3164
+ }
3165
+
3166
+ /** Known values of {@link VectorSearchAlgorithmKind} that the service accepts. */
3167
+ export declare enum KnownVectorSearchAlgorithmKind {
3168
+ /** HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm. */
3169
+ Hnsw = "hnsw",
3170
+ /** Exhaustive KNN algorithm which will perform brute-force search. */
3171
+ ExhaustiveKnn = "exhaustiveKnn"
3172
+ }
3173
+
3174
+ /** Known values of {@link VectorSearchAlgorithmMetric} that the service accepts. */
3175
+ export declare enum KnownVectorSearchAlgorithmMetric {
3176
+ /** Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. */
3177
+ Cosine = "cosine",
3178
+ /** Computes the straight-line distance between vectors in a multi-dimensional space. The smaller the distance, the closer the similarity. */
3179
+ Euclidean = "euclidean",
3180
+ /** Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The larger and more positive, the closer the similarity. */
3181
+ DotProduct = "dotProduct",
3182
+ /** Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity. */
3183
+ Hamming = "hamming"
4120
3184
  }
4121
3185
 
4122
3186
  /** Known values of {@link VectorSearchCompressionKind} that the service accepts. */
4123
3187
  export declare enum KnownVectorSearchCompressionKind {
4124
3188
  /** Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. */
4125
- ScalarQuantization = "scalarQuantization"
3189
+ ScalarQuantization = "scalarQuantization",
3190
+ /** Binary Quantization, a type of compression method. In binary quantization, the original vectors values are compressed to the narrower binary type by discretizing and representing each component of a vector using binary values, thereby reducing the overall data size. */
3191
+ BinaryQuantization = "binaryQuantization"
4126
3192
  }
4127
3193
 
4128
- /** Known values of {@link VectorSearchCompressionTargetDataType} that the service accepts. */
4129
- export declare enum KnownVectorSearchCompressionTargetDataType {
3194
+ /** Known values of {@link VectorSearchCompressionTarget} that the service accepts. */
3195
+ export declare enum KnownVectorSearchCompressionTarget {
4130
3196
  /** Int8 */
4131
3197
  Int8 = "int8"
4132
3198
  }
@@ -4136,19 +3202,7 @@ export declare enum KnownVectorSearchVectorizerKind {
4136
3202
  /** Generate embeddings using an Azure OpenAI resource at query time. */
4137
3203
  AzureOpenAI = "azureOpenAI",
4138
3204
  /** Generate embeddings using a custom web endpoint at query time. */
4139
- CustomWebApi = "customWebApi",
4140
- /** Generate embeddings for an image or text input at query time using the Azure AI Services Vision Vectorize API. */
4141
- AIServicesVision = "aiServicesVision",
4142
- /** Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Studio Model Catalog at query time. */
4143
- AML = "aml"
4144
- }
4145
-
4146
- /** Known values of {@link VectorThresholdKind} that the service accepts. */
4147
- export declare enum KnownVectorThresholdKind {
4148
- /** The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */
4149
- VectorSimilarity = "vectorSimilarity",
4150
- /** The results of the vector query will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. */
4151
- SearchScore = "searchScore"
3205
+ CustomWebApi = "customWebApi"
4152
3206
  }
4153
3207
 
4154
3208
  /** Known values of {@link VisualFeature} that the service accepts. */
@@ -4286,7 +3340,7 @@ export declare type LexicalAnalyzer = CustomAnalyzer | PatternAnalyzer | LuceneS
4286
3340
  * **ur.microsoft**: Microsoft analyzer for Urdu. \
4287
3341
  * **vi.microsoft**: Microsoft analyzer for Vietnamese. \
4288
3342
  * **standard.lucene**: Standard Lucene analyzer. \
4289
- * **standardasciifolding.lucene**: Standard ASCII Folding Lucene analyzer. See https:\/\/docs.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#Analyzers \
3343
+ * **standardasciifolding.lucene**: Standard ASCII Folding Lucene analyzer. See https:\/\/learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#Analyzers \
4290
3344
  * **keyword**: Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordAnalyzer.html \
4291
3345
  * **pattern**: Flexibly separates text into terms via a regular expression pattern. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/PatternAnalyzer.html \
4292
3346
  * **simple**: Divides text at non-letters and converts them to lower case. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/SimpleAnalyzer.html \
@@ -4295,24 +3349,6 @@ export declare type LexicalAnalyzer = CustomAnalyzer | PatternAnalyzer | LuceneS
4295
3349
  */
4296
3350
  export declare type LexicalAnalyzerName = string;
4297
3351
 
4298
- /**
4299
- * Contains the possible cases for LexicalNormalizer.
4300
- */
4301
- export declare type LexicalNormalizer = CustomNormalizer;
4302
-
4303
- /**
4304
- * Defines values for LexicalNormalizerName. \
4305
- * {@link KnownLexicalNormalizerName} can be used interchangeably with LexicalNormalizerName,
4306
- * this enum contains the known values that the service supports.
4307
- * ### Known values supported by the service
4308
- * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \
4309
- * **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \
4310
- * **lowercase**: Normalizes token text to lowercase. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \
4311
- * **standard**: Standard normalizer, which consists of lowercase and asciifolding. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \
4312
- * **uppercase**: Normalizes token text to uppercase. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html
4313
- */
4314
- export declare type LexicalNormalizerName = string;
4315
-
4316
3352
  /**
4317
3353
  * Contains the possible cases for Tokenizer.
4318
3354
  */
@@ -4349,23 +3385,6 @@ export declare interface LimitTokenFilter extends BaseTokenFilter {
4349
3385
  consumeAllTokens?: boolean;
4350
3386
  }
4351
3387
 
4352
- /**
4353
- * Defines values for LineEnding. \
4354
- * {@link KnownLineEnding} can be used interchangeably with LineEnding,
4355
- * this enum contains the known values that the service supports.
4356
- * ### Known values supported by the service
4357
- * **space**: Lines are separated by a single space character. \
4358
- * **carriageReturn**: Lines are separated by a carriage return ('\r') character. \
4359
- * **lineFeed**: Lines are separated by a single line feed ('\n') character. \
4360
- * **carriageReturnLineFeed**: Lines are separated by a carriage return and a line feed ('\r\n') character.
4361
- */
4362
- export declare type LineEnding = string;
4363
-
4364
- /**
4365
- * Options for list aliases operation.
4366
- */
4367
- export declare type ListAliasesOptions = OperationOptions;
4368
-
4369
3388
  /**
4370
3389
  * Options for a list data sources operation.
4371
3390
  */
@@ -4515,12 +3534,6 @@ export declare type MicrosoftTokenizerLanguage = "bangla" | "bulgarian" | "catal
4515
3534
  */
4516
3535
  export declare type NarrowedModel<TModel extends object, TFields extends SelectFields<TModel> = SelectFields<TModel>> = (<T>() => T extends TModel ? true : false) extends <T>() => T extends never ? true : false ? TModel : (<T>() => T extends TModel ? true : false) extends <T>() => T extends object ? true : false ? TModel : (<T>() => T extends TModel ? true : false) extends <T>() => T extends any ? true : false ? TModel : (<T>() => T extends TModel ? true : false) extends <T>() => T extends unknown ? true : false ? TModel : (<T>() => T extends TFields ? true : false) extends <T>() => T extends never ? true : false ? never : (<T>() => T extends TFields ? true : false) extends <T>() => T extends SelectFields<TModel> ? true : false ? TModel : SearchPick<TModel, TFields>;
4517
3536
 
4518
- /** Defines a data deletion detection policy utilizing Azure Blob Storage's native soft delete feature for deletion detection. */
4519
- export declare interface NativeBlobSoftDeleteDeletionDetectionPolicy extends BaseDataDeletionDetectionPolicy {
4520
- /** Polymorphic discriminator, which specifies the different types this object can be */
4521
- odatatype: "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy";
4522
- }
4523
-
4524
3537
  /**
4525
3538
  * Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene.
4526
3539
  */
@@ -4559,14 +3572,16 @@ export declare interface NGramTokenizer extends BaseLexicalTokenizer {
4559
3572
  }
4560
3573
 
4561
3574
  /**
4562
- * Specifies the properties for connecting to an AML vectorizer with no authentication.
3575
+ * Defines values for OcrLineEnding. \
3576
+ * {@link KnownOcrLineEnding} can be used interchangeably with OcrLineEnding,
3577
+ * this enum contains the known values that the service supports.
3578
+ * ### Known values supported by the service
3579
+ * **space**: Lines are separated by a single space character. \
3580
+ * **carriageReturn**: Lines are separated by a carriage return ('\r') character. \
3581
+ * **lineFeed**: Lines are separated by a single line feed ('\n') character. \
3582
+ * **carriageReturnLineFeed**: Lines are separated by a carriage return and a line feed ('\r\n') character.
4563
3583
  */
4564
- export declare interface NoAuthAzureMachineLearningVectorizerParameters extends BaseAzureMachineLearningVectorizerParameters {
4565
- /** Indicates how the service should attempt to identify itself to the AML instance */
4566
- authKind: "none";
4567
- /** The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */
4568
- scoringUri: string;
4569
- }
3584
+ export declare type OcrLineEnding = string;
4570
3585
 
4571
3586
  /** A skill that extracts text from image files. */
4572
3587
  export declare interface OcrSkill extends BaseSearchIndexerSkill {
@@ -4578,7 +3593,7 @@ export declare interface OcrSkill extends BaseSearchIndexerSkill {
4578
3593
  shouldDetectOrientation?: boolean;
4579
3594
  }
4580
3595
 
4581
- export declare type OcrSkillLanguage = "af" | "sq" | "anp" | "ar" | "ast" | "awa" | "az" | "bfy" | "eu" | "be" | "be-cyrl" | "be-latn" | "bho" | "bi" | "brx" | "bs" | "bra" | "br" | "bg" | "bns" | "bua" | "ca" | "ceb" | "rab" | "ch" | "hne" | "zh-Hans" | "zh-Hant" | "kw" | "co" | "crh" | "hr" | "cs" | "da" | "prs" | "dhi" | "doi" | "nl" | "en" | "myv" | "et" | "fo" | "fj" | "fil" | "fi" | "fr" | "fur" | "gag" | "gl" | "de" | "gil" | "gon" | "el" | "kl" | "gvr" | "ht" | "hlb" | "hni" | "bgc" | "haw" | "hi" | "mww" | "hoc" | "hu" | "is" | "smn" | "id" | "ia" | "iu" | "ga" | "it" | "ja" | "Jns" | "jv" | "kea" | "kac" | "xnr" | "krc" | "kaa-cyrl" | "kaa" | "csb" | "kk-cyrl" | "kk-latn" | "klr" | "kha" | "quc" | "ko" | "kfq" | "kpy" | "kos" | "kum" | "ku-arab" | "ku-latn" | "kru" | "ky" | "lkt" | "la" | "lt" | "dsb" | "smj" | "lb" | "bfz" | "ms" | "mt" | "kmj" | "gv" | "mi" | "mr" | "mn" | "cnr-cyrl" | "cnr-latn" | "nap" | "ne" | "niu" | "nog" | "sme" | "nb" | "no" | "oc" | "os" | "ps" | "fa" | "pl" | "pt" | "pa" | "ksh" | "ro" | "rm" | "ru" | "sck" | "sm" | "sa" | "sat" | "sco" | "gd" | "sr" | "sr-Cyrl" | "sr-Latn" | "xsr" | "srx" | "sms" | "sk" | "sl" | "so" | "sma" | "es" | "sw" | "sv" | "tg" | "tt" | "tet" | "thf" | "to" | "tr" | "tk" | "tyv" | "hsb" | "ur" | "ug" | "uz-arab" | "uz-cyrl" | "uz" | "vo" | "wae" | "cy" | "fy" | "yua" | "za" | "zu" | "unk";
3596
+ export declare type OcrSkillLanguage = `${KnownOcrSkillLanguage}`;
4582
3597
 
4583
3598
  /**
4584
3599
  * Escapes an odata filter expression to avoid errors with quoting string literals.
@@ -4748,7 +3763,7 @@ export declare interface PIIDetectionSkill extends BaseSearchIndexerSkill {
4748
3763
  domain?: string;
4749
3764
  }
4750
3765
 
4751
- export declare type PIIDetectionSkillMaskingMode = "none" | "replace";
3766
+ export declare type PIIDetectionSkillMaskingMode = `${KnownPIIDetectionSkillMaskingMode}`;
4752
3767
 
4753
3768
  /**
4754
3769
  * A value that specifies whether answers should be returned as part of the search response.
@@ -4791,7 +3806,7 @@ export declare interface QueryAnswerResult {
4791
3806
  */
4792
3807
  export declare type QueryCaption = ExtractiveQueryCaption;
4793
3808
 
4794
- /** Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type 'semantic'.. */
3809
+ /** Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type `semantic`. */
4795
3810
  export declare interface QueryCaptionResult {
4796
3811
  /** Describes unknown properties. The value of an unknown property can be of "any" type. */
4797
3812
  [property: string]: any;
@@ -4807,169 +3822,16 @@ export declare interface QueryCaptionResult {
4807
3822
  readonly highlights?: string;
4808
3823
  }
4809
3824
 
4810
- /**
4811
- * Defines values for QueryDebugMode. \
4812
- * {@link KnownQueryDebugMode} can be used interchangeably with QueryDebugMode,
4813
- * this enum contains the known values that the service supports.
4814
- * ### Known values supported by the service
4815
- * **disabled**: No query debugging information will be returned. \
4816
- * **semantic**: Allows the user to further explore their reranked results.
4817
- */
4818
- export declare type QueryDebugMode = string;
4819
-
4820
- /**
4821
- * Defines values for QueryLanguage. \
4822
- * {@link KnownQueryLanguage} can be used interchangeably with QueryLanguage,
4823
- * this enum contains the known values that the service supports.
4824
- * ### Known values supported by the service
4825
- * **none**: Query language not specified. \
4826
- * **en-us**: Query language value for English (United States). \
4827
- * **en-gb**: Query language value for English (Great Britain). \
4828
- * **en-in**: Query language value for English (India). \
4829
- * **en-ca**: Query language value for English (Canada). \
4830
- * **en-au**: Query language value for English (Australia). \
4831
- * **fr-fr**: Query language value for French (France). \
4832
- * **fr-ca**: Query language value for French (Canada). \
4833
- * **de-de**: Query language value for German (Germany). \
4834
- * **es-es**: Query language value for Spanish (Spain). \
4835
- * **es-mx**: Query language value for Spanish (Mexico). \
4836
- * **zh-cn**: Query language value for Chinese (China). \
4837
- * **zh-tw**: Query language value for Chinese (Taiwan). \
4838
- * **pt-br**: Query language value for Portuguese (Brazil). \
4839
- * **pt-pt**: Query language value for Portuguese (Portugal). \
4840
- * **it-it**: Query language value for Italian (Italy). \
4841
- * **ja-jp**: Query language value for Japanese (Japan). \
4842
- * **ko-kr**: Query language value for Korean (Korea). \
4843
- * **ru-ru**: Query language value for Russian (Russia). \
4844
- * **cs-cz**: Query language value for Czech (Czech Republic). \
4845
- * **nl-be**: Query language value for Dutch (Belgium). \
4846
- * **nl-nl**: Query language value for Dutch (Netherlands). \
4847
- * **hu-hu**: Query language value for Hungarian (Hungary). \
4848
- * **pl-pl**: Query language value for Polish (Poland). \
4849
- * **sv-se**: Query language value for Swedish (Sweden). \
4850
- * **tr-tr**: Query language value for Turkish (Turkey). \
4851
- * **hi-in**: Query language value for Hindi (India). \
4852
- * **ar-sa**: Query language value for Arabic (Saudi Arabia). \
4853
- * **ar-eg**: Query language value for Arabic (Egypt). \
4854
- * **ar-ma**: Query language value for Arabic (Morocco). \
4855
- * **ar-kw**: Query language value for Arabic (Kuwait). \
4856
- * **ar-jo**: Query language value for Arabic (Jordan). \
4857
- * **da-dk**: Query language value for Danish (Denmark). \
4858
- * **no-no**: Query language value for Norwegian (Norway). \
4859
- * **bg-bg**: Query language value for Bulgarian (Bulgaria). \
4860
- * **hr-hr**: Query language value for Croatian (Croatia). \
4861
- * **hr-ba**: Query language value for Croatian (Bosnia and Herzegovina). \
4862
- * **ms-my**: Query language value for Malay (Malaysia). \
4863
- * **ms-bn**: Query language value for Malay (Brunei Darussalam). \
4864
- * **sl-sl**: Query language value for Slovenian (Slovenia). \
4865
- * **ta-in**: Query language value for Tamil (India). \
4866
- * **vi-vn**: Query language value for Vietnamese (Viet Nam). \
4867
- * **el-gr**: Query language value for Greek (Greece). \
4868
- * **ro-ro**: Query language value for Romanian (Romania). \
4869
- * **is-is**: Query language value for Icelandic (Iceland). \
4870
- * **id-id**: Query language value for Indonesian (Indonesia). \
4871
- * **th-th**: Query language value for Thai (Thailand). \
4872
- * **lt-lt**: Query language value for Lithuanian (Lithuania). \
4873
- * **uk-ua**: Query language value for Ukrainian (Ukraine). \
4874
- * **lv-lv**: Query language value for Latvian (Latvia). \
4875
- * **et-ee**: Query language value for Estonian (Estonia). \
4876
- * **ca-es**: Query language value for Catalan. \
4877
- * **fi-fi**: Query language value for Finnish (Finland). \
4878
- * **sr-ba**: Query language value for Serbian (Bosnia and Herzegovina). \
4879
- * **sr-me**: Query language value for Serbian (Montenegro). \
4880
- * **sr-rs**: Query language value for Serbian (Serbia). \
4881
- * **sk-sk**: Query language value for Slovak (Slovakia). \
4882
- * **nb-no**: Query language value for Norwegian (Norway). \
4883
- * **hy-am**: Query language value for Armenian (Armenia). \
4884
- * **bn-in**: Query language value for Bengali (India). \
4885
- * **eu-es**: Query language value for Basque. \
4886
- * **gl-es**: Query language value for Galician. \
4887
- * **gu-in**: Query language value for Gujarati (India). \
4888
- * **he-il**: Query language value for Hebrew (Israel). \
4889
- * **ga-ie**: Query language value for Irish (Ireland). \
4890
- * **kn-in**: Query language value for Kannada (India). \
4891
- * **ml-in**: Query language value for Malayalam (India). \
4892
- * **mr-in**: Query language value for Marathi (India). \
4893
- * **fa-ae**: Query language value for Persian (U.A.E.). \
4894
- * **pa-in**: Query language value for Punjabi (India). \
4895
- * **te-in**: Query language value for Telugu (India). \
4896
- * **ur-pk**: Query language value for Urdu (Pakistan).
4897
- */
4898
- export declare type QueryLanguage = string;
4899
-
4900
- /** The raw concatenated strings that were sent to the semantic enrichment process. */
4901
- export declare interface QueryResultDocumentRerankerInput {
4902
- /**
4903
- * The raw string for the title field that was used for semantic enrichment.
4904
- * NOTE: This property will not be serialized. It can only be populated by the server.
4905
- */
4906
- readonly title?: string;
4907
- /**
4908
- * The raw concatenated strings for the content fields that were used for semantic enrichment.
4909
- * NOTE: This property will not be serialized. It can only be populated by the server.
4910
- */
4911
- readonly content?: string;
4912
- /**
4913
- * The raw concatenated strings for the keyword fields that were used for semantic enrichment.
4914
- * NOTE: This property will not be serialized. It can only be populated by the server.
4915
- */
4916
- readonly keywords?: string;
4917
- }
4918
-
4919
- /** Description of fields that were sent to the semantic enrichment process, as well as how they were used */
4920
- export declare interface QueryResultDocumentSemanticField {
4921
- /**
4922
- * The name of the field that was sent to the semantic enrichment process
4923
- * NOTE: This property will not be serialized. It can only be populated by the server.
4924
- */
4925
- readonly name?: string;
4926
- /**
4927
- * The way the field was used for the semantic enrichment process (fully used, partially used, or unused)
4928
- * NOTE: This property will not be serialized. It can only be populated by the server.
4929
- */
4930
- readonly state?: SemanticFieldState;
4931
- }
4932
-
4933
- /**
4934
- * Defines values for QuerySpellerType. \
4935
- * {@link KnownQuerySpellerType} can be used interchangeably with QuerySpellerType,
4936
- * this enum contains the known values that the service supports.
4937
- * ### Known values supported by the service
4938
- * **none**: Speller not enabled. \
4939
- * **lexicon**: Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter.
4940
- */
4941
- export declare type QuerySpellerType = string;
4942
-
4943
3825
  /** Defines values for QueryType. */
4944
3826
  export declare type QueryType = "simple" | "full" | "semantic";
4945
3827
 
4946
- export declare type RegexFlags = "CANON_EQ" | "CASE_INSENSITIVE" | "COMMENTS" | "DOTALL" | "LITERAL" | "MULTILINE" | "UNICODE_CASE" | "UNIX_LINES";
4947
-
4948
- /**
4949
- * Options for reset docs operation.
4950
- */
4951
- export declare interface ResetDocumentsOptions extends OperationOptions {
4952
- /** document keys to be reset */
4953
- documentKeys?: string[];
4954
- /** datasource document identifiers to be reset */
4955
- datasourceDocumentIds?: string[];
4956
- /** If false, keys or ids will be appended to existing ones. If true, only the keys or ids in this payload will be queued to be re-ingested. */
4957
- overwrite?: boolean;
4958
- }
3828
+ export declare type RegexFlags = `${KnownRegexFlags}`;
4959
3829
 
4960
3830
  /**
4961
3831
  * Options for reset indexer operation.
4962
3832
  */
4963
3833
  export declare type ResetIndexerOptions = OperationOptions;
4964
3834
 
4965
- /**
4966
- * Options for reset skills operation.
4967
- */
4968
- export declare interface ResetSkillsOptions extends OperationOptions {
4969
- /** the names of skills to be reset. */
4970
- skillNames?: string[];
4971
- }
4972
-
4973
3835
  /** Represents a resource's usage and quota. */
4974
3836
  export declare interface ResourceCounter {
4975
3837
  /** The resource usage amount. */
@@ -4984,7 +3846,7 @@ export declare interface ResourceCounter {
4984
3846
  export declare type RunIndexerOptions = OperationOptions;
4985
3847
 
4986
3848
  /** Contains configuration options specific to the scalar quantization compression method used during indexing and querying. */
4987
- export declare interface ScalarQuantizationCompressionConfiguration extends BaseVectorSearchCompressionConfiguration {
3849
+ export declare interface ScalarQuantizationCompression extends BaseVectorSearchCompression {
4988
3850
  /** Polymorphic discriminator, which specifies the different types this object can be */
4989
3851
  kind: "scalarQuantization";
4990
3852
  /** Contains the parameters specific to Scalar Quantization. */
@@ -4994,7 +3856,7 @@ export declare interface ScalarQuantizationCompressionConfiguration extends Base
4994
3856
  /** Contains the parameters specific to Scalar Quantization. */
4995
3857
  export declare interface ScalarQuantizationParameters {
4996
3858
  /** The quantized data type of compressed vector values. */
4997
- quantizedDataType?: VectorSearchCompressionTargetDataType;
3859
+ quantizedDataType?: VectorSearchCompressionTarget;
4998
3860
  }
4999
3861
 
5000
3862
  /**
@@ -5035,16 +3897,6 @@ export declare interface ScoringProfile {
5035
3897
  /** Defines values for ScoringStatistics. */
5036
3898
  export declare type ScoringStatistics = "local" | "global";
5037
3899
 
5038
- /** Represents an index alias, which describes a mapping from the alias name to an index. The alias name can be used in place of the index name for supported operations. */
5039
- export declare interface SearchAlias {
5040
- /** The name of the alias. */
5041
- name: string;
5042
- /** The name of the index this alias maps to. Only one index name may be specified. */
5043
- indexes: string[];
5044
- /** The ETag of the alias. */
5045
- etag?: string;
5046
- }
5047
-
5048
3900
  /**
5049
3901
  * Class used to perform operations against a search index,
5050
3902
  * including querying documents in the index as well as
@@ -5073,10 +3925,6 @@ export declare class SearchClient<TModel extends object> implements IndexDocumen
5073
3925
  * A reference to the auto-generated SearchClient
5074
3926
  */
5075
3927
  private readonly client;
5076
- /**
5077
- * A reference to the internal HTTP pipeline for use with raw requests
5078
- */
5079
- readonly pipeline: Pipeline;
5080
3928
  /**
5081
3929
  * Creates an instance of SearchClient.
5082
3930
  *
@@ -5283,7 +4131,6 @@ export declare class SearchClient<TModel extends object> implements IndexDocumen
5283
4131
  private convertSelect;
5284
4132
  private convertVectorQueryFields;
5285
4133
  private convertSearchFields;
5286
- private convertSemanticFields;
5287
4134
  private convertOrderBy;
5288
4135
  private convertQueryAnswers;
5289
4136
  private convertQueryCaptions;
@@ -5396,16 +4243,44 @@ export declare type SearchFieldArray<TModel extends object = object> = (<T>() =>
5396
4243
 
5397
4244
  /**
5398
4245
  * Defines values for SearchFieldDataType.
5399
- * Possible values include: 'Edm.String', 'Edm.Int32', 'Edm.Int64', 'Edm.Double', 'Edm.Boolean',
5400
- * 'Edm.DateTimeOffset', 'Edm.GeographyPoint', 'Collection(Edm.String)', 'Collection(Edm.Int32)',
5401
- * 'Collection(Edm.Int64)', 'Collection(Edm.Double)', 'Collection(Edm.Boolean)',
5402
- * 'Collection(Edm.DateTimeOffset)', 'Collection(Edm.GeographyPoint)', 'Collection(Edm.Single)',
5403
- * 'Collection(Edm.Half)', 'Collection(Edm.Int16)', 'Collection(Edm.SByte)'
5404
4246
  *
5405
- * NB: `Edm.Single` alone is not a valid data type. It must be used as part of a collection type.
5406
- * @readonly
4247
+ * ### Known values supported by the service:
4248
+ *
4249
+ * **Edm.String**: Indicates that a field contains a string.
4250
+ *
4251
+ * **Edm.Int32**: Indicates that a field contains a 32-bit signed integer.
4252
+ *
4253
+ * **Edm.Int64**: Indicates that a field contains a 64-bit signed integer.
4254
+ *
4255
+ * **Edm.Double**: Indicates that a field contains an IEEE double-precision floating point number.
4256
+ *
4257
+ * **Edm.Boolean**: Indicates that a field contains a Boolean value (true or false).
4258
+ *
4259
+ * **Edm.DateTimeOffset**: Indicates that a field contains a date/time value, including timezone
4260
+ * information.
4261
+ *
4262
+ * **Edm.GeographyPoint**: Indicates that a field contains a geo-location in terms of longitude and
4263
+ * latitude.
4264
+ *
4265
+ * **Edm.ComplexType**: Indicates that a field contains one or more complex objects that in turn
4266
+ * have sub-fields of other types.
4267
+ *
4268
+ * **Edm.Single**: Indicates that a field contains a single-precision floating point number. This is
4269
+ * only valid when used as part of a collection type, i.e. Collection(Edm.Single).
4270
+ *
4271
+ * **Edm.Half**: Indicates that a field contains a half-precision floating point number. This is
4272
+ * only valid when used as part of a collection type, i.e. Collection(Edm.Half).
4273
+ *
4274
+ * **Edm.Int16**: Indicates that a field contains a 16-bit signed integer. This is only valid when
4275
+ * used as part of a collection type, i.e. Collection(Edm.Int16).
4276
+ *
4277
+ * **Edm.SByte**: Indicates that a field contains a 8-bit signed integer. This is only valid when
4278
+ * used as part of a collection type, i.e. Collection(Edm.SByte).
4279
+ *
4280
+ * **Edm.Byte**: Indicates that a field contains a 8-bit unsigned integer. This is only valid when
4281
+ * used as part of a collection type, i.e. Collection(Edm.Byte).
5407
4282
  */
5408
- export declare type SearchFieldDataType = "Edm.String" | "Edm.Int32" | "Edm.Int64" | "Edm.Double" | "Edm.Boolean" | "Edm.DateTimeOffset" | "Edm.GeographyPoint" | "Collection(Edm.String)" | "Collection(Edm.Int32)" | "Collection(Edm.Int64)" | "Collection(Edm.Double)" | "Collection(Edm.Boolean)" | "Collection(Edm.DateTimeOffset)" | "Collection(Edm.GeographyPoint)" | "Collection(Edm.Single)" | "Collection(Edm.Half)" | "Collection(Edm.Int16)" | "Collection(Edm.SByte)" | "Collection(Edm.Byte)";
4283
+ export declare type SearchFieldDataType = Exclude<`${KnownSearchFieldDataType}` | `Collection(${KnownSearchFieldDataType})`, "Edm.ComplexType" | "Edm.Byte" | "Edm.Half" | "Edm.Int16" | "Edm.SByte" | "Edm.Single">;
5409
4284
 
5410
4285
  /**
5411
4286
  * Represents a search index definition, which describes the fields and search behavior of an
@@ -5454,10 +4329,6 @@ export declare interface SearchIndex {
5454
4329
  * The character filters for the index.
5455
4330
  */
5456
4331
  charFilters?: CharFilter[];
5457
- /**
5458
- * The normalizers for the index.
5459
- */
5460
- normalizers?: LexicalNormalizer[];
5461
4332
  /**
5462
4333
  * A description of an encryption key that you create in Azure Key Vault. This key is used to
5463
4334
  * provide an additional level of encryption-at-rest for your data when you want full assurance
@@ -5489,11 +4360,6 @@ export declare interface SearchIndex {
5489
4360
  etag?: string;
5490
4361
  }
5491
4362
 
5492
- /**
5493
- * Search Alias object.
5494
- */
5495
- export declare type SearchIndexAlias = SearchAlias;
5496
-
5497
4363
  /**
5498
4364
  * Class to perform operations to manage
5499
4365
  * (create, update, list/delete)
@@ -5518,10 +4384,6 @@ export declare class SearchIndexClient {
5518
4384
  * A reference to the auto-generated SearchServiceClient
5519
4385
  */
5520
4386
  private readonly client;
5521
- /**
5522
- * A reference to the internal HTTP pipeline for use with raw requests
5523
- */
5524
- readonly pipeline: Pipeline;
5525
4387
  /**
5526
4388
  * Used to authenticate requests to the service.
5527
4389
  */
@@ -5554,13 +4416,6 @@ export declare class SearchIndexClient {
5554
4416
  * @param options - Options to the list index operation.
5555
4417
  */
5556
4418
  listIndexes(options?: ListIndexesOptions): IndexIterator;
5557
- private listAliasesPage;
5558
- private listAliasesAll;
5559
- /**
5560
- * Lists all aliases available for a search service.
5561
- * @param options - The options parameters.
5562
- */
5563
- listAliases(options?: ListAliasesOptions): AliasIterator;
5564
4419
  private listIndexesNamesPage;
5565
4420
  private listIndexesNamesAll;
5566
4421
  /**
@@ -5626,31 +4481,6 @@ export declare class SearchIndexClient {
5626
4481
  * @param options - Additional optional arguments.
5627
4482
  */
5628
4483
  deleteSynonymMap(synonymMap: string | SynonymMap, options?: DeleteSynonymMapOptions): Promise<void>;
5629
- /**
5630
- * Creates a new search alias or updates an alias if it already exists.
5631
- * @param alias - The definition of the alias to create or update.
5632
- * @param options - The options parameters.
5633
- */
5634
- createOrUpdateAlias(alias: SearchIndexAlias, options?: CreateOrUpdateAliasOptions): Promise<SearchIndexAlias>;
5635
- /**
5636
- * Creates a new search alias.
5637
- * @param alias - The definition of the alias to create.
5638
- * @param options - The options parameters.
5639
- */
5640
- createAlias(alias: SearchIndexAlias, options?: CreateAliasOptions): Promise<SearchIndexAlias>;
5641
- /**
5642
- * Deletes a search alias and its associated mapping to an index. This operation is permanent, with no
5643
- * recovery option. The mapped index is untouched by this operation.
5644
- * @param alias - Alias/Name name of the alias to delete.
5645
- * @param options - The options parameters.
5646
- */
5647
- deleteAlias(alias: string | SearchIndexAlias, options?: DeleteAliasOptions): Promise<void>;
5648
- /**
5649
- * Retrieves an alias definition.
5650
- * @param aliasName - The name of the alias to retrieve.
5651
- * @param options - The options parameters.
5652
- */
5653
- getAlias(aliasName: string, options?: GetAliasOptions): Promise<SearchIndexAlias>;
5654
4484
  /**
5655
4485
  * Retrieves statistics about an index, such as the count of documents and the size
5656
4486
  * of index storage.
@@ -5764,29 +4594,6 @@ export declare interface SearchIndexer {
5764
4594
  * paid services created on or after January 1, 2019.
5765
4595
  */
5766
4596
  encryptionKey?: SearchResourceEncryptionKey;
5767
- /**
5768
- * Adds caching to an enrichment pipeline to allow for incremental modification steps without
5769
- * having to rebuild the index every time.
5770
- */
5771
- cache?: SearchIndexerCache;
5772
- }
5773
-
5774
- export declare interface SearchIndexerCache {
5775
- /**
5776
- * The connection string to the storage account where the cache data will be persisted.
5777
- */
5778
- storageConnectionString?: string;
5779
- /**
5780
- * Specifies whether incremental reprocessing is enabled.
5781
- */
5782
- enableReprocessing?: boolean;
5783
- /** The user-assigned managed identity used for connections to the enrichment cache. If the
5784
- * connection string indicates an identity (ResourceId) and it's not specified, the
5785
- * system-assigned managed identity is used. On updates to the indexer, if the identity is
5786
- * unspecified, the value remains unchanged. If set to "none", the value of this property is
5787
- * cleared.
5788
- */
5789
- identity?: SearchIndexerDataIdentity;
5790
4597
  }
5791
4598
 
5792
4599
  /**
@@ -5813,10 +4620,6 @@ export declare class SearchIndexerClient {
5813
4620
  * A reference to the auto-generated SearchServiceClient
5814
4621
  */
5815
4622
  private readonly client;
5816
- /**
5817
- * A reference to the internal HTTP pipeline for use with raw requests
5818
- */
5819
- readonly pipeline: Pipeline;
5820
4623
  /**
5821
4624
  * Creates an instance of SearchIndexerClient.
5822
4625
  *
@@ -5954,19 +4757,6 @@ export declare class SearchIndexerClient {
5954
4757
  * @param options - Additional optional arguments.
5955
4758
  */
5956
4759
  runIndexer(indexerName: string, options?: RunIndexerOptions): Promise<void>;
5957
- /**
5958
- * Resets specific documents in the datasource to be selectively re-ingested by the indexer.
5959
- * @param indexerName - The name of the indexer to reset documents for.
5960
- * @param options - Additional optional arguments.
5961
- */
5962
- resetDocuments(indexerName: string, options?: ResetDocumentsOptions): Promise<void>;
5963
- /**
5964
- * Reset an existing skillset in a search service.
5965
- * @param skillsetName - The name of the skillset to reset.
5966
- * @param skillNames - The names of skills to reset.
5967
- * @param options - The options parameters.
5968
- */
5969
- resetSkills(skillsetName: string, options?: ResetSkillsOptions): Promise<void>;
5970
4760
  }
5971
4761
 
5972
4762
  /**
@@ -6066,14 +4856,14 @@ export declare interface SearchIndexerDataSourceConnection {
6066
4856
  encryptionKey?: SearchResourceEncryptionKey;
6067
4857
  }
6068
4858
 
6069
- export declare type SearchIndexerDataSourceType = "azuresql" | "cosmosdb" | "azureblob" | "azuretable" | "mysql" | "adlsgen2" | "onelake";
4859
+ export declare type SearchIndexerDataSourceType = `${KnownSearchIndexerDataSourceType}`;
6070
4860
 
6071
4861
  /** Specifies the identity for a datasource to use. */
6072
4862
  export declare interface SearchIndexerDataUserAssignedIdentity extends BaseSearchIndexerDataIdentity {
6073
4863
  /** Polymorphic discriminator, which specifies the different types this object can be */
6074
4864
  odatatype: "#Microsoft.Azure.Search.DataUserAssignedIdentity";
6075
4865
  /** The fully qualified Azure resource Id of a user assigned managed identity typically in the form "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" that should have been assigned to the search service. */
6076
- userAssignedIdentity: string;
4866
+ resourceId: string;
6077
4867
  }
6078
4868
 
6079
4869
  /** Represents an item- or document-level indexing error. */
@@ -6111,11 +4901,19 @@ export declare interface SearchIndexerError {
6111
4901
  }
6112
4902
 
6113
4903
  /** Definition of additional projections to secondary search indexes. */
6114
- export declare interface SearchIndexerIndexProjections {
4904
+ export declare interface SearchIndexerIndexProjection {
6115
4905
  /** A list of projections to be performed to secondary search indexes. */
6116
4906
  selectors: SearchIndexerIndexProjectionSelector[];
6117
4907
  /** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */
6118
- parameters?: SearchIndexerIndexProjectionsParameters;
4908
+ parameters?: SearchIndexerIndexProjectionParameters;
4909
+ }
4910
+
4911
+ /** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */
4912
+ export declare interface SearchIndexerIndexProjectionParameters {
4913
+ /** Describes unknown properties.*/
4914
+ [property: string]: unknown;
4915
+ /** Defines behavior of the index projections in relation to the rest of the indexer. */
4916
+ projectionMode?: IndexProjectionMode;
6119
4917
  }
6120
4918
 
6121
4919
  /** Description for what data to store in the designated search index. */
@@ -6130,14 +4928,6 @@ export declare interface SearchIndexerIndexProjectionSelector {
6130
4928
  mappings: InputFieldMappingEntry[];
6131
4929
  }
6132
4930
 
6133
- /** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */
6134
- export declare interface SearchIndexerIndexProjectionsParameters {
6135
- /** Describes unknown properties.*/
6136
- [property: string]: unknown;
6137
- /** Defines behavior of the index projections in relation to the rest of the indexer. */
6138
- projectionMode?: IndexProjectionMode;
6139
- }
6140
-
6141
4931
  /**
6142
4932
  * Definition of additional projections to azure blob, table, or files, of enriched data.
6143
4933
  */
@@ -6233,7 +5023,7 @@ export declare interface SearchIndexerLimits {
6233
5023
  /**
6234
5024
  * Contains the possible cases for Skill.
6235
5025
  */
6236
- export declare type SearchIndexerSkill = AzureMachineLearningSkill | AzureOpenAIEmbeddingSkill | ConditionalSkill | CustomEntityLookupSkill | DocumentExtractionSkill | EntityLinkingSkill | EntityRecognitionSkill | EntityRecognitionSkillV3 | ImageAnalysisSkill | KeyPhraseExtractionSkill | LanguageDetectionSkill | MergeSkill | OcrSkill | PIIDetectionSkill | SentimentSkill | SentimentSkillV3 | ShaperSkill | SplitSkill | TextTranslationSkill | VisionVectorizeSkill | WebApiSkill;
5026
+ export declare type SearchIndexerSkill = AzureOpenAIEmbeddingSkill | ConditionalSkill | CustomEntityLookupSkill | DocumentExtractionSkill | EntityLinkingSkill | EntityRecognitionSkill | EntityRecognitionSkillV3 | ImageAnalysisSkill | KeyPhraseExtractionSkill | LanguageDetectionSkill | MergeSkill | OcrSkill | PIIDetectionSkill | SentimentSkill | SentimentSkillV3 | ShaperSkill | SplitSkill | TextTranslationSkill | WebApiSkill;
6237
5027
 
6238
5028
  /**
6239
5029
  * A list of skills.
@@ -6262,7 +5052,7 @@ export declare interface SearchIndexerSkillset {
6262
5052
  /**
6263
5053
  * Definition of additional projections to secondary search index(es).
6264
5054
  */
6265
- indexProjections?: SearchIndexerIndexProjections;
5055
+ indexProjection?: SearchIndexerIndexProjection;
6266
5056
  /**
6267
5057
  * The ETag of the skillset.
6268
5058
  */
@@ -6688,21 +5478,8 @@ export declare type SearchResult<TModel extends object, TFields extends SelectFi
6688
5478
  */
6689
5479
  readonly captions?: QueryCaptionResult[];
6690
5480
  document: NarrowedModel<TModel, TFields>;
6691
- /**
6692
- * Contains debugging information that can be used to further explore your search results.
6693
- * NOTE: This property will not be serialized. It can only be populated by the server.
6694
- */
6695
- readonly documentDebugInfo?: DocumentDebugInfo[];
6696
5481
  };
6697
5482
 
6698
- /** The results of the vector query will filter based on the '\@search.score' value. Note this is the \@search.score returned as part of the search response. The threshold direction will be chosen for higher \@search.score. */
6699
- export declare interface SearchScoreThreshold extends BaseVectorThreshold {
6700
- /** Polymorphic discriminator, which specifies the different types this object can be */
6701
- kind: "searchScore";
6702
- /** The threshold will filter based on the '\@search.score' value. Note this is the \@search.score returned as part of the search response. The threshold direction will be chosen for higher \@search.score. */
6703
- value: number;
6704
- }
6705
-
6706
5483
  /**
6707
5484
  * Response from a get service statistics request. If successful, it includes service level
6708
5485
  * counters and limits.
@@ -6750,52 +5527,15 @@ export declare interface SemanticConfiguration {
6750
5527
  prioritizedFields: SemanticPrioritizedFields;
6751
5528
  }
6752
5529
 
6753
- /**
6754
- * Debug options for semantic search queries.
6755
- */
6756
- export declare interface SemanticDebugInfo {
6757
- /**
6758
- * The title field that was sent to the semantic enrichment process, as well as how it was used
6759
- * NOTE: This property will not be serialized. It can only be populated by the server.
6760
- */
6761
- readonly titleField?: QueryResultDocumentSemanticField;
6762
- /**
6763
- * The content fields that were sent to the semantic enrichment process, as well as how they were used
6764
- * NOTE: This property will not be serialized. It can only be populated by the server.
6765
- */
6766
- readonly contentFields?: QueryResultDocumentSemanticField[];
6767
- /**
6768
- * The keyword fields that were sent to the semantic enrichment process, as well as how they were used
6769
- * NOTE: This property will not be serialized. It can only be populated by the server.
6770
- */
6771
- readonly keywordFields?: QueryResultDocumentSemanticField[];
6772
- /**
6773
- * The raw concatenated strings that were sent to the semantic enrichment process.
6774
- * NOTE: This property will not be serialized. It can only be populated by the server.
6775
- */
6776
- readonly rerankerInput?: QueryResultDocumentRerankerInput;
6777
- }
6778
-
6779
- export declare type SemanticErrorMode = "partial" | "fail";
5530
+ export declare type SemanticErrorMode = `${KnownSemanticErrorMode}`;
6780
5531
 
6781
- export declare type SemanticErrorReason = "maxWaitExceeded" | "capacityOverloaded" | "transient";
5532
+ export declare type SemanticErrorReason = `${KnownSemanticErrorReason}`;
6782
5533
 
6783
5534
  /** A field that is used as part of the semantic configuration. */
6784
5535
  export declare interface SemanticField {
6785
5536
  name: string;
6786
5537
  }
6787
5538
 
6788
- /**
6789
- * Defines values for SemanticFieldState. \
6790
- * {@link KnownSemanticFieldState} can be used interchangeably with SemanticFieldState,
6791
- * this enum contains the known values that the service supports.
6792
- * ### Known values supported by the service
6793
- * **used**: The field was fully used for semantic enrichment. \
6794
- * **unused**: The field was not used for semantic enrichment. \
6795
- * **partial**: The field was partially used for semantic enrichment.
6796
- */
6797
- export declare type SemanticFieldState = string;
6798
-
6799
5539
  /** Describes the title, content, and keywords fields to be used for semantic ranking, captions, highlights, and answers. */
6800
5540
  export declare interface SemanticPrioritizedFields {
6801
5541
  /** Defines the title field to be used for semantic ranking, captions, highlights, and answers. If you don't have a title field in your index, leave this blank. */
@@ -6849,17 +5589,9 @@ export declare interface SemanticSearchOptions {
6849
5589
  * different queries between the base retrieval and ranking phase, and the L2 semantic phase.
6850
5590
  */
6851
5591
  semanticQuery?: string;
6852
- /**
6853
- * The list of field names used for semantic search.
6854
- */
6855
- semanticFields?: string[];
6856
- /**
6857
- * Enables a debugging tool that can be used to further explore your search results.
6858
- */
6859
- debugMode?: QueryDebugMode;
6860
5592
  }
6861
5593
 
6862
- export declare type SemanticSearchResultsType = "baseResults" | "rerankedResults";
5594
+ export declare type SemanticSearchResultsType = `${KnownSemanticSearchResultsType}`;
6863
5595
 
6864
5596
  /**
6865
5597
  * Text analytics positive-negative sentiment analysis, scored as a floating point value in a range of zero to 1.
@@ -6873,7 +5605,7 @@ export declare interface SentimentSkill extends BaseSearchIndexerSkill {
6873
5605
  defaultLanguageCode?: SentimentSkillLanguage;
6874
5606
  }
6875
5607
 
6876
- export declare type SentimentSkillLanguage = "da" | "nl" | "en" | "fi" | "fr" | "de" | "el" | "it" | "no" | "pl" | "pt-PT" | "ru" | "es" | "sv" | "tr";
5608
+ export declare type SentimentSkillLanguage = `${KnownSentimentSkillLanguage}`;
6877
5609
 
6878
5610
  /** Using the Text Analytics API, evaluates unstructured text and for each record, provides sentiment labels (such as "negative", "neutral" and "positive") based on the highest confidence score found by the service at a sentence and document-level. */
6879
5611
  export declare interface SentimentSkillV3 extends BaseSearchIndexerSkill {
@@ -6889,8 +5621,6 @@ export declare interface SentimentSkillV3 extends BaseSearchIndexerSkill {
6889
5621
 
6890
5622
  /** Represents service-level resource counters and quotas. */
6891
5623
  export declare interface ServiceCounters {
6892
- /** Total number of aliases. */
6893
- aliasCounter: ResourceCounter;
6894
5624
  /** Total number of documents across all indexes in the service. */
6895
5625
  documentCounter: ResourceCounter;
6896
5626
  /** Total number of indexes. */
@@ -6920,7 +5650,7 @@ export declare interface ServiceLimits {
6920
5650
  /** The maximum number of objects in complex collections allowed per document. */
6921
5651
  maxComplexObjectsInCollectionsPerDocument?: number;
6922
5652
  /** The maximum amount of storage in bytes allowed per index. */
6923
- maxStoragePerIndex?: number;
5653
+ maxStoragePerIndexInBytes?: number;
6924
5654
  }
6925
5655
 
6926
5656
  /** A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). */
@@ -6969,11 +5699,7 @@ export declare interface SimpleField {
6969
5699
  */
6970
5700
  name: string;
6971
5701
  /**
6972
- * The data type of the field. Possible values include: 'Edm.String', 'Edm.Int32', 'Edm.Int64',
6973
- * 'Edm.Double', 'Edm.Boolean', 'Edm.DateTimeOffset', 'Edm.GeographyPoint',
6974
- * 'Collection(Edm.String)', 'Collection(Edm.Int32)', 'Collection(Edm.Int64)',
6975
- * 'Collection(Edm.Double)', 'Collection(Edm.Boolean)', 'Collection(Edm.DateTimeOffset)',
6976
- * 'Collection(Edm.GeographyPoint)', 'Collection(Edm.Single)'
5702
+ * The data type of the field.
6977
5703
  */
6978
5704
  type: SearchFieldDataType;
6979
5705
  /**
@@ -6986,10 +5712,10 @@ export declare interface SimpleField {
6986
5712
  /**
6987
5713
  * A value indicating whether the field can be returned in a search result. You can disable this
6988
5714
  * option if you want to use a field (for example, margin) as a filter, sorting, or scoring
6989
- * mechanism but do not want the field to be visible to the end user. This property must be true
5715
+ * mechanism but do not want the field to be visible to the end user. This property must be false
6990
5716
  * for key fields. This property can be changed on existing fields. Enabling this property does
6991
- * not cause any increase in index storage requirements. Default is true for simple fields and
6992
- * false for vector fields.
5717
+ * not cause any increase in index storage requirements. Default is true for vector fields, false
5718
+ * otherwise.
6993
5719
  */
6994
5720
  hidden?: boolean;
6995
5721
  /**
@@ -6997,94 +5723,92 @@ export declare interface SimpleField {
6997
5723
  * returned in a search result. You can disable this option if you don't plan to return the field
6998
5724
  * contents in a search response to save on storage overhead. This can only be set during index
6999
5725
  * creation and only for vector fields. This property cannot be changed for existing fields or set
7000
- * as false for new fields. If this property is set as false, the property `hidden` must be set as
7001
- * true. This property must be true or unset for key fields, for new fields, and for non-vector
7002
- * fields, and it must be null for complex fields. Disabling this property will reduce index
7003
- * storage requirements. The default is true for vector fields.
5726
+ * as false for new fields. If this property is set as false, the property 'hidden' must be set to
5727
+ * 'true'. This property must be false or unset for key fields, for new fields, and for non-vector
5728
+ * fields. Disabling this property will reduce index storage requirements.
7004
5729
  */
7005
5730
  stored?: boolean;
7006
5731
  /**
7007
5732
  * A value indicating whether the field is full-text searchable. This means it will undergo
7008
5733
  * analysis such as word-breaking during indexing. If you set a searchable field to a value like
7009
5734
  * "sunny day", internally it will be split into the individual tokens "sunny" and "day". This
7010
- * enables full-text searches for these terms. This property must be false for simple
7011
- * fields of other non-string data types.
7012
- * Note: searchable fields consume extra space in your index since Azure Cognitive Search will store an
7013
- * additional tokenized version of the field value for full-text searches.
7014
- * Defaults to false for simple fields.
5735
+ * enables full-text searches for these terms. Fields of type Edm.String or Collection(Edm.String)
5736
+ * are searchable by default. This property must be false for simple fields of other non-string
5737
+ * data types. Note: searchable fields consume extra space
5738
+ * in your index to accommodate additional tokenized versions of the field value for full-text
5739
+ * searches. If you want to save space in your index and you don't need a field to be included in
5740
+ * searches, set searchable to false. Default is false.
7015
5741
  */
7016
5742
  searchable?: boolean;
7017
5743
  /**
7018
- * A value indicating whether to enable the field to be referenced in $filter queries. Filterable
5744
+ * A value indicating whether to enable the field to be referenced in $filter queries. filterable
7019
5745
  * differs from searchable in how strings are handled. Fields of type Edm.String or
7020
- * Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are
7021
- * for exact matches only. For example, if you set such a field f to "sunny day", $filter=f eq
7022
- * 'sunny' will find no matches, but $filter=f eq 'sunny day' will.
7023
- * Default is false.
5746
+ * Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are for
5747
+ * exact matches only. For example, if you set such a field f to "sunny day", $filter=f eq 'sunny'
5748
+ * will find no matches, but $filter=f eq 'sunny day' will. Default is false.
7024
5749
  */
7025
5750
  filterable?: boolean;
7026
5751
  /**
7027
5752
  * A value indicating whether to enable the field to be referenced in $orderby expressions. By
7028
- * default Azure Cognitive Search sorts results by score, but in many experiences users will want
7029
- * to sort by fields in the documents. A simple field can be sortable only if it is single-valued
7030
- * (it has a single value in the scope of the parent document). Simple collection fields cannot
7031
- * be sortable, since they are multi-valued. Simple sub-fields of complex collections are also
5753
+ * default, the search engine sorts results by score, but in many experiences users will want to
5754
+ * sort by fields in the documents. A simple field can be sortable only if it is single-valued (it
5755
+ * has a single value in the scope of the parent document). Simple collection fields cannot be
5756
+ * sortable, since they are multi-valued. Simple sub-fields of complex collections are also
7032
5757
  * multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent
7033
- * field, or an ancestor field, that's the complex collection. The default for sortable is false.
5758
+ * field, or an ancestor field, that's the complex collection. The default is false.
5759
+ *
7034
5760
  */
7035
5761
  sortable?: boolean;
7036
5762
  /**
7037
5763
  * A value indicating whether to enable the field to be referenced in facet queries. Typically
7038
5764
  * used in a presentation of search results that includes hit count by category (for example,
7039
- * search for digital cameras and see hits by brand, by megapixels, by price, and so on).
7040
- * Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable.
7041
- * Default is false for all other simple fields.
5765
+ * search for digital cameras and see hits by brand, by megapixels, by price, and so on). Fields
5766
+ * of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. Default is
5767
+ * false.
7042
5768
  */
7043
5769
  facetable?: boolean;
7044
5770
  /**
7045
- * The name of the language analyzer to use for the field. This option can be used only with
7046
- * searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer.
7047
- * Once the analyzer is chosen, it cannot be changed for the field.
7048
- * KnownAnalyzerNames is an enum containing known values.
5771
+ * The name of the analyzer to use for the field. This option can be used only with searchable
5772
+ * fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the
5773
+ * analyzer is chosen, it cannot be changed for the field.
7049
5774
  */
7050
5775
  analyzerName?: LexicalAnalyzerName;
7051
5776
  /**
7052
5777
  * The name of the analyzer used at search time for the field. This option can be used only with
7053
- * searchable fields. It must be set together with indexAnalyzer and it cannot be set together
7054
- * with the analyzer option. This analyzer can be updated on an existing field.
7055
- * KnownAnalyzerNames is an enum containing known values.
5778
+ * searchable fields. It must be set together with `indexAnalyzerName` and it cannot be set
5779
+ * together with the `analyzerName` option. This property cannot be set to the name of a language
5780
+ * analyzer; use the `analyzerName` property instead if you need a language analyzer. This
5781
+ * analyzer can be updated on an existing field.
7056
5782
  */
7057
5783
  searchAnalyzerName?: LexicalAnalyzerName;
7058
5784
  /**
7059
5785
  * The name of the analyzer used at indexing time for the field. This option can be used only
7060
5786
  * with searchable fields. It must be set together with searchAnalyzer and it cannot be set
7061
- * together with the analyzer option. Once the analyzer is chosen, it cannot be changed for the
7062
- * field. KnownAnalyzerNames is an enum containing known values.
5787
+ * together with the analyzer option. This property cannot be set to the name of a language
5788
+ * analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer
5789
+ * is chosen, it cannot be changed for the field.
7063
5790
  */
7064
5791
  indexAnalyzerName?: LexicalAnalyzerName;
7065
5792
  /**
7066
5793
  * A list of the names of synonym maps to associate with this field. This option can be used only
7067
5794
  * with searchable fields. Currently only one synonym map per field is supported. Assigning a
7068
- * synonym map to a field ensures that query terms targeting that field are expanded at
7069
- * query-time using the rules in the synonym map. This attribute can be changed on existing
7070
- * fields.
5795
+ * synonym map to a field ensures that query terms targeting that field are expanded at query-time
5796
+ * using the rules in the synonym map. This attribute can be changed on existing fields.
7071
5797
  */
7072
5798
  synonymMapNames?: string[];
7073
- /**
7074
- * The name of the normalizer used at indexing time for the field.
7075
- */
7076
- normalizerName?: LexicalNormalizerName;
7077
5799
  /**
7078
5800
  * The dimensionality of the vector field.
7079
5801
  */
7080
5802
  vectorSearchDimensions?: number;
7081
5803
  /**
7082
- * The name of the vector search algorithm configuration that specifies the algorithm and
7083
- * optional parameters for searching the vector field.
5804
+ * The name of the vector search profile that specifies the algorithm and vectorizer to use when
5805
+ * searching the vector field.
7084
5806
  */
7085
5807
  vectorSearchProfileName?: string;
7086
- /** The encoding format to interpret the field contents. */
7087
- vectorSearchEncodingFormat?: VectorEncodingFormat;
5808
+ /**
5809
+ * The encoding format to interpret the field contents.
5810
+ */
5811
+ vectorEncodingFormat?: VectorEncodingFormat;
7088
5812
  }
7089
5813
 
7090
5814
  /** A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. */
@@ -7108,16 +5832,6 @@ export declare interface SoftDeleteColumnDeletionDetectionPolicy extends BaseDat
7108
5832
  softDeleteMarkerValue?: string;
7109
5833
  }
7110
5834
 
7111
- /**
7112
- * Defines values for Speller. \
7113
- * {@link KnownSpeller} can be used interchangeably with Speller,
7114
- * this enum contains the known values that the service supports.
7115
- * ### Known values supported by the service
7116
- * **none**: Speller not enabled. \
7117
- * **lexicon**: Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter.
7118
- */
7119
- export declare type Speller = string;
7120
-
7121
5835
  /** A skill to split a string into chunks of text. */
7122
5836
  export declare interface SplitSkill extends BaseSearchIndexerSkill {
7123
5837
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -7130,7 +5844,7 @@ export declare interface SplitSkill extends BaseSearchIndexerSkill {
7130
5844
  maxPageLength?: number;
7131
5845
  }
7132
5846
 
7133
- export declare type SplitSkillLanguage = "am" | "bs" | "cs" | "da" | "de" | "en" | "es" | "et" | "fi" | "fr" | "he" | "hi" | "hr" | "hu" | "id" | "is" | "it" | "ja" | "ko" | "lv" | "nb" | "nl" | "pl" | "pt" | "pt-br" | "ru" | "sk" | "sl" | "sr" | "sv" | "tr" | "ur" | "zh";
5847
+ export declare type SplitSkillLanguage = `${KnownSplitSkillLanguage}`;
7134
5848
 
7135
5849
  /** Defines a data change detection policy that captures changes using the Integrated Change Tracking feature of Azure SQL Database. */
7136
5850
  export declare interface SqlIntegratedChangeTrackingPolicy extends BaseDataChangeDetectionPolicy {
@@ -7333,7 +6047,7 @@ export declare interface TagScoringParameters {
7333
6047
  tagsParameter: string;
7334
6048
  }
7335
6049
 
7336
- export declare type TextSplitMode = "pages" | "sentences";
6050
+ export declare type TextSplitMode = `${KnownTextSplitMode}`;
7337
6051
 
7338
6052
  /** A skill to translate text from one language to another. */
7339
6053
  export declare interface TextTranslationSkill extends BaseSearchIndexerSkill {
@@ -7347,7 +6061,7 @@ export declare interface TextTranslationSkill extends BaseSearchIndexerSkill {
7347
6061
  suggestedFrom?: TextTranslationSkillLanguage;
7348
6062
  }
7349
6063
 
7350
- export declare type TextTranslationSkillLanguage = "af" | "ar" | "bn" | "bs" | "bg" | "yue" | "ca" | "zh-Hans" | "zh-Hant" | "hr" | "cs" | "da" | "nl" | "en" | "et" | "fj" | "fil" | "fi" | "fr" | "de" | "el" | "ht" | "he" | "hi" | "mww" | "hu" | "is" | "id" | "it" | "ja" | "sw" | "tlh" | "tlh-Latn" | "tlh-Piqd" | "ko" | "lv" | "lt" | "mg" | "ms" | "mt" | "nb" | "fa" | "pl" | "pt" | "pt-br" | "pt-PT" | "otq" | "ro" | "ru" | "sm" | "sr-Cyrl" | "sr-Latn" | "sk" | "sl" | "es" | "sv" | "ty" | "ta" | "te" | "th" | "to" | "tr" | "uk" | "ur" | "vi" | "cy" | "yua" | "ga" | "kn" | "mi" | "ml" | "pa";
6064
+ export declare type TextTranslationSkillLanguage = `${KnownTextTranslationSkillLanguage}`;
7351
6065
 
7352
6066
  /** Defines weights on index fields for which matches should boost scoring in search queries. */
7353
6067
  export declare interface TextWeights {
@@ -7357,18 +6071,6 @@ export declare interface TextWeights {
7357
6071
  };
7358
6072
  }
7359
6073
 
7360
- /**
7361
- * Specifies the properties for connecting to an AML vectorizer with a managed identity.
7362
- */
7363
- export declare interface TokenAuthAzureMachineLearningVectorizerParameters extends BaseAzureMachineLearningVectorizerParameters {
7364
- /** Indicates how the service should attempt to identify itself to the AML instance */
7365
- authKind: "token";
7366
- /** The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/\{guid\}/resourceGroups/\{resource-group-name\}/Microsoft.MachineLearningServices/workspaces/\{workspace-name\}/services/\{service_name\}. */
7367
- resourceId: string;
7368
- /** The region the AML service is deployed in. */
7369
- region?: string;
7370
- }
7371
-
7372
6074
  /** Defines values for TokenCharacterKind. */
7373
6075
  export declare type TokenCharacterKind = "letter" | "digit" | "whitespace" | "punctuation" | "symbol";
7374
6076
 
@@ -7409,7 +6111,7 @@ export declare type TokenFilter = AsciiFoldingTokenFilter | CjkBigramTokenFilter
7409
6111
  * **shingle**: Creates combinations of tokens as a single token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html \
7410
6112
  * **snowball**: A filter that stems words using a Snowball-generated stemmer. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html \
7411
6113
  * **sorani_normalization**: Normalizes the Unicode representation of Sorani text. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html \
7412
- * **stemmer**: Language specific stemming filter. See https:\/\/docs.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters \
6114
+ * **stemmer**: Language specific stemming filter. See https:\/\/learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters \
7413
6115
  * **stopwords**: Removes stop words from a token stream. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html \
7414
6116
  * **trim**: Trims leading and trailing whitespace from tokens. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html \
7415
6117
  * **truncate**: Truncates the terms to a specific length. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html \
@@ -7459,23 +6161,7 @@ export declare type UploadDocumentsOptions = IndexDocumentsOptions;
7459
6161
  */
7460
6162
  export declare type VectorEncodingFormat = string;
7461
6163
 
7462
- export declare type VectorFilterMode = "postFilter" | "preFilter";
7463
-
7464
- /** The query parameters to use for vector search when a base 64 encoded binary of an image that needs to be vectorized is provided. */
7465
- export declare interface VectorizableImageBinaryQuery<TModel extends object> extends BaseVectorQuery<TModel> {
7466
- /** Polymorphic discriminator, which specifies the different types this object can be */
7467
- kind: "imageBinary";
7468
- /** The base64 encoded binary of an image to be vectorized to perform a vector search query. */
7469
- binaryImage: string;
7470
- }
7471
-
7472
- /** The query parameters to use for vector search when an url that represents an image value that needs to be vectorized is provided. */
7473
- export declare interface VectorizableImageUrlQuery<TModel extends object> extends BaseVectorQuery<TModel> {
7474
- /** Polymorphic discriminator, which specifies the different types this object can be */
7475
- kind: "imageUrl";
7476
- /** The URL of an image to be vectorized to perform a vector search query. */
7477
- url: string;
7478
- }
6164
+ export declare type VectorFilterMode = `${KnownVectorFilterMode}`;
7479
6165
 
7480
6166
  /** The query parameters to use for vector search when a text value that needs to be vectorized is provided. */
7481
6167
  export declare interface VectorizableTextQuery<TModel extends object> extends BaseVectorQuery<TModel> {
@@ -7494,55 +6180,50 @@ export declare interface VectorizedQuery<TModel extends object> extends BaseVect
7494
6180
  }
7495
6181
 
7496
6182
  /** The query parameters for vector and hybrid search queries. */
7497
- export declare type VectorQuery<TModel extends object> = VectorizedQuery<TModel> | VectorizableTextQuery<TModel> | VectorizableImageUrlQuery<TModel> | VectorizableImageBinaryQuery<TModel>;
6183
+ export declare type VectorQuery<TModel extends object> = VectorizedQuery<TModel> | VectorizableTextQuery<TModel>;
7498
6184
 
7499
- export declare type VectorQueryKind = "vector" | "text" | "imageUrl" | "imageBinary";
6185
+ export declare type VectorQueryKind = `${KnownVectorQueryKind}`;
7500
6186
 
7501
6187
  /** Contains configuration options related to vector search. */
7502
6188
  export declare interface VectorSearch {
7503
6189
  /** Defines combinations of configurations to use with vector search. */
7504
6190
  profiles?: VectorSearchProfile[];
7505
- /** Contains configuration options specific to the algorithm used during indexing and/or querying. */
6191
+ /** Contains configuration options specific to the algorithm used during indexing or querying. */
7506
6192
  algorithms?: VectorSearchAlgorithmConfiguration[];
7507
6193
  /** Contains configuration options on how to vectorize text vector queries. */
7508
6194
  vectorizers?: VectorSearchVectorizer[];
7509
- /**
7510
- * Contains configuration options specific to the compression method used during indexing or
7511
- * querying.
7512
- */
7513
- compressions?: VectorSearchCompressionConfiguration[];
6195
+ /** Contains configuration options specific to the compression method used during indexing or querying. */
6196
+ compressions?: VectorSearchCompression[];
7514
6197
  }
7515
6198
 
7516
6199
  /** Contains configuration options specific to the algorithm used during indexing and/or querying. */
7517
6200
  export declare type VectorSearchAlgorithmConfiguration = HnswAlgorithmConfiguration | ExhaustiveKnnAlgorithmConfiguration;
7518
6201
 
7519
- export declare type VectorSearchAlgorithmKind = "hnsw" | "exhaustiveKnn";
6202
+ export declare type VectorSearchAlgorithmKind = `${KnownVectorSearchAlgorithmKind}`;
7520
6203
 
7521
- export declare type VectorSearchAlgorithmMetric = "cosine" | "euclidean" | "dotProduct" | "hamming";
6204
+ export declare type VectorSearchAlgorithmMetric = `${KnownVectorSearchAlgorithmMetric}`;
7522
6205
 
7523
- /**
7524
- * Contains configuration options specific to the compression method used during indexing or
7525
- * querying.
7526
- */
7527
- export declare type VectorSearchCompressionConfiguration = ScalarQuantizationCompressionConfiguration;
6206
+ /** Contains configuration options specific to the compression method used during indexing or querying. */
6207
+ export declare type VectorSearchCompression = BinaryQuantizationCompression | ScalarQuantizationCompression;
7528
6208
 
7529
6209
  /**
7530
6210
  * Defines values for VectorSearchCompressionKind. \
7531
6211
  * {@link KnownVectorSearchCompressionKind} can be used interchangeably with VectorSearchCompressionKind,
7532
6212
  * this enum contains the known values that the service supports.
7533
6213
  * ### Known values supported by the service
7534
- * **scalarQuantization**: Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size.
6214
+ * **scalarQuantization**: Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. \
6215
+ * **binaryQuantization**: Binary Quantization, a type of compression method. In binary quantization, the original vectors values are compressed to the narrower binary type by discretizing and representing each component of a vector using binary values, thereby reducing the overall data size.
7535
6216
  */
7536
6217
  export declare type VectorSearchCompressionKind = string;
7537
6218
 
7538
6219
  /**
7539
- * Defines values for VectorSearchCompressionTargetDataType. \
7540
- * {@link KnownVectorSearchCompressionTargetDataType} can be used interchangeably with VectorSearchCompressionTargetDataType,
6220
+ * Defines values for VectorSearchCompressionTarget. \
6221
+ * {@link KnownVectorSearchCompressionTarget} can be used interchangeably with VectorSearchCompressionTarget,
7541
6222
  * this enum contains the known values that the service supports.
7542
6223
  * ### Known values supported by the service
7543
6224
  * **int8**
7544
6225
  */
7545
- export declare type VectorSearchCompressionTargetDataType = string;
6226
+ export declare type VectorSearchCompressionTarget = string;
7546
6227
 
7547
6228
  /**
7548
6229
  * Defines options for vector search queries
@@ -7565,38 +6246,45 @@ export declare interface VectorSearchProfile {
7565
6246
  name: string;
7566
6247
  /** The name of the vector search algorithm configuration that specifies the algorithm and optional parameters. */
7567
6248
  algorithmConfigurationName: string;
7568
- /** The name of the kind of vectorization method being configured for use with vector search. */
7569
- vectorizer?: string;
6249
+ /** The name of the vectorization being configured for use with vector search. */
6250
+ vectorizerName?: string;
7570
6251
  /** The name of the compression method configuration that specifies the compression method and optional parameters. */
7571
- compressionConfigurationName?: string;
6252
+ compressionName?: string;
7572
6253
  }
7573
6254
 
7574
6255
  /** Contains configuration options on how to vectorize text vector queries. */
7575
- export declare type VectorSearchVectorizer = AzureOpenAIVectorizer | CustomVectorizer | AIServicesVisionVectorizer | AzureMachineLearningVectorizer;
7576
-
7577
- export declare type VectorSearchVectorizerKind = "azureOpenAI" | "customWebApi" | "aiServicesVision" | "aml";
6256
+ export declare type VectorSearchVectorizer = AzureOpenAIVectorizer | WebApiVectorizer;
7578
6257
 
7579
- /** The results of the vector query will be filtered based on the vector similarity metric. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */
7580
- export declare interface VectorSimilarityThreshold extends BaseVectorThreshold {
7581
- /** Polymorphic discriminator, which specifies the different types this object can be */
7582
- kind: "vectorSimilarity";
7583
- /** The threshold will filter based on the similarity metric value. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. */
7584
- value: number;
7585
- }
6258
+ /**
6259
+ * Defines values for VectorSearchVectorizerKind. \
6260
+ * {@link KnownVectorSearchVectorizerKind} can be used interchangeably with VectorSearchVectorizerKind,
6261
+ * this enum contains the known values that the service supports.
6262
+ * ### Known values supported by the service
6263
+ * **azureOpenAI**: Generate embeddings using an Azure OpenAI resource at query time. \
6264
+ * **customWebApi**: Generate embeddings using a custom web endpoint at query time.
6265
+ */
6266
+ export declare type VectorSearchVectorizerKind = string;
7586
6267
 
7587
- /** The threshold used for vector queries. */
7588
- export declare type VectorThreshold = VectorSimilarityThreshold | SearchScoreThreshold;
6268
+ export declare type VisualFeature = `${KnownVisualFeature}`;
7589
6269
 
7590
- /** Allows you to generate a vector embedding for a given image or text input using the Azure AI Services Vision Vectorize API. */
7591
- export declare interface VisionVectorizeSkill extends BaseSearchIndexerSkill {
7592
- /** Polymorphic discriminator, which specifies the different types this object can be */
7593
- odatatype: "#Microsoft.Skills.Vision.VectorizeSkill";
7594
- /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */
7595
- modelVersion?: string;
6270
+ /** Specifies the properties for connecting to a user-defined vectorizer. */
6271
+ export declare interface WebApiParameters {
6272
+ /** The URI of the Web API providing the vectorizer. */
6273
+ uri?: string;
6274
+ /** The headers required to make the HTTP request. */
6275
+ httpHeaders?: {
6276
+ [propertyName: string]: string;
6277
+ };
6278
+ /** The method for the HTTP request. */
6279
+ httpMethod?: string;
6280
+ /** The desired timeout for the request. Default is 30 seconds. */
6281
+ timeout?: string;
6282
+ /** Applies to custom endpoints that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the vectorization connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */
6283
+ authResourceId?: string;
6284
+ /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */
6285
+ authIdentity?: SearchIndexerDataIdentity;
7596
6286
  }
7597
6287
 
7598
- export declare type VisualFeature = "adult" | "brands" | "categories" | "description" | "faces" | "objects" | "tags";
7599
-
7600
6288
  /**
7601
6289
  * A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call
7602
6290
  * your custom code.
@@ -7650,6 +6338,14 @@ export declare interface WebApiSkill extends BaseSearchIndexerSkill {
7650
6338
  authIdentity?: SearchIndexerDataIdentity;
7651
6339
  }
7652
6340
 
6341
+ /** Specifies a user-defined vectorizer for generating the vector embedding of a query string. Integration of an external vectorizer is achieved using the custom Web API interface of a skillset. */
6342
+ export declare interface WebApiVectorizer extends BaseVectorSearchVectorizer {
6343
+ /** Polymorphic discriminator, which specifies the different types this object can be */
6344
+ kind: "customWebApi";
6345
+ /** Specifies the properties of the user-defined vectorizer. */
6346
+ parameters?: WebApiParameters;
6347
+ }
6348
+
7653
6349
  /** Splits words into subwords and performs optional transformations on subword groups. This token filter is implemented using Apache Lucene. */
7654
6350
  export declare interface WordDelimiterTokenFilter extends BaseTokenFilter {
7655
6351
  /** Polymorphic discriminator, which specifies the different types this object can be */