@azure/search-documents 12.0.0 → 12.1.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. package/README.md +52 -32
  2. package/dist/index.js +13767 -11641
  3. package/dist/index.js.map +1 -1
  4. package/dist-esm/src/constants.js +2 -1
  5. package/dist-esm/src/constants.js.map +1 -1
  6. package/dist-esm/src/errorModels.js +4 -0
  7. package/dist-esm/src/errorModels.js.map +1 -0
  8. package/dist-esm/src/generated/data/models/index.js +191 -7
  9. package/dist-esm/src/generated/data/models/index.js.map +1 -1
  10. package/dist-esm/src/generated/data/models/mappers.js +534 -294
  11. package/dist-esm/src/generated/data/models/mappers.js.map +1 -1
  12. package/dist-esm/src/generated/data/models/parameters.js +254 -203
  13. package/dist-esm/src/generated/data/models/parameters.js.map +1 -1
  14. package/dist-esm/src/generated/data/operations/documents.js +50 -45
  15. package/dist-esm/src/generated/data/operations/documents.js.map +1 -1
  16. package/dist-esm/src/generated/data/operationsInterfaces/documents.js.map +1 -1
  17. package/dist-esm/src/generated/data/searchClient.js +4 -4
  18. package/dist-esm/src/generated/data/searchClient.js.map +1 -1
  19. package/dist-esm/src/generated/service/models/index.js +176 -84
  20. package/dist-esm/src/generated/service/models/index.js.map +1 -1
  21. package/dist-esm/src/generated/service/models/mappers.js +2370 -1437
  22. package/dist-esm/src/generated/service/models/mappers.js.map +1 -1
  23. package/dist-esm/src/generated/service/models/parameters.js +99 -49
  24. package/dist-esm/src/generated/service/models/parameters.js.map +1 -1
  25. package/dist-esm/src/generated/service/operations/aliases.js +160 -0
  26. package/dist-esm/src/generated/service/operations/aliases.js.map +1 -0
  27. package/dist-esm/src/generated/service/operations/dataSources.js +26 -23
  28. package/dist-esm/src/generated/service/operations/dataSources.js.map +1 -1
  29. package/dist-esm/src/generated/service/operations/index.js +1 -0
  30. package/dist-esm/src/generated/service/operations/index.js.map +1 -1
  31. package/dist-esm/src/generated/service/operations/indexers.js +61 -33
  32. package/dist-esm/src/generated/service/operations/indexers.js.map +1 -1
  33. package/dist-esm/src/generated/service/operations/indexes.js +30 -30
  34. package/dist-esm/src/generated/service/operations/indexes.js.map +1 -1
  35. package/dist-esm/src/generated/service/operations/skillsets.js +52 -23
  36. package/dist-esm/src/generated/service/operations/skillsets.js.map +1 -1
  37. package/dist-esm/src/generated/service/operations/synonymMaps.js +22 -22
  38. package/dist-esm/src/generated/service/operations/synonymMaps.js.map +1 -1
  39. package/dist-esm/src/generated/service/operationsInterfaces/aliases.js +9 -0
  40. package/dist-esm/src/generated/service/operationsInterfaces/aliases.js.map +1 -0
  41. package/dist-esm/src/generated/service/operationsInterfaces/dataSources.js.map +1 -1
  42. package/dist-esm/src/generated/service/operationsInterfaces/index.js +1 -0
  43. package/dist-esm/src/generated/service/operationsInterfaces/index.js.map +1 -1
  44. package/dist-esm/src/generated/service/operationsInterfaces/indexers.js.map +1 -1
  45. package/dist-esm/src/generated/service/operationsInterfaces/indexes.js.map +1 -1
  46. package/dist-esm/src/generated/service/operationsInterfaces/skillsets.js.map +1 -1
  47. package/dist-esm/src/generated/service/operationsInterfaces/synonymMaps.js.map +1 -1
  48. package/dist-esm/src/generated/service/searchServiceClient.js +10 -9
  49. package/dist-esm/src/generated/service/searchServiceClient.js.map +1 -1
  50. package/dist-esm/src/generatedStringLiteralUnions.js +4 -0
  51. package/dist-esm/src/generatedStringLiteralUnions.js.map +1 -0
  52. package/dist-esm/src/index.js +9 -9
  53. package/dist-esm/src/index.js.map +1 -1
  54. package/dist-esm/src/indexDocumentsBatch.js.map +1 -1
  55. package/dist-esm/src/indexModels.js.map +1 -1
  56. package/dist-esm/src/odata.js.map +1 -1
  57. package/dist-esm/src/odataMetadataPolicy.js.map +1 -1
  58. package/dist-esm/src/searchApiKeyCredentialPolicy.js.map +1 -1
  59. package/dist-esm/src/searchClient.js +23 -17
  60. package/dist-esm/src/searchClient.js.map +1 -1
  61. package/dist-esm/src/searchIndexClient.js +155 -6
  62. package/dist-esm/src/searchIndexClient.js.map +1 -1
  63. package/dist-esm/src/searchIndexerClient.js +53 -4
  64. package/dist-esm/src/searchIndexerClient.js.map +1 -1
  65. package/dist-esm/src/searchIndexingBufferedSender.js +3 -8
  66. package/dist-esm/src/searchIndexingBufferedSender.js.map +1 -1
  67. package/dist-esm/src/serialization.js.map +1 -1
  68. package/dist-esm/src/serviceModels.js.map +1 -1
  69. package/dist-esm/src/serviceUtils.js +71 -26
  70. package/dist-esm/src/serviceUtils.js.map +1 -1
  71. package/dist-esm/src/synonymMapHelper.browser.js.map +1 -1
  72. package/dist-esm/src/synonymMapHelper.js +1 -1
  73. package/dist-esm/src/synonymMapHelper.js.map +1 -1
  74. package/dist-esm/src/tracing.js +1 -1
  75. package/dist-esm/src/tracing.js.map +1 -1
  76. package/package.json +39 -40
  77. package/types/search-documents.d.ts +1655 -152
@@ -5,9 +5,17 @@ import { ExtendedCommonClientOptions } from '@azure/core-http-compat';
5
5
  import { KeyCredential } from '@azure/core-auth';
6
6
  import { OperationOptions } from '@azure/core-client';
7
7
  import { PagedAsyncIterableIterator } from '@azure/core-paging';
8
+ import { Pipeline } from '@azure/core-rest-pipeline';
8
9
  import { RestError } from '@azure/core-rest-pipeline';
9
10
  import { TokenCredential } from '@azure/core-auth';
10
11
 
12
+ /**
13
+ * An iterator for listing the aliases that exist in the Search service. Will make requests
14
+ * as needed during iteration. Use .byPage() to make one request to the server
15
+ * per iteration.
16
+ */
17
+ export declare type AliasIterator = PagedAsyncIterableIterator<SearchIndexAlias, SearchIndexAlias[], {}>;
18
+
11
19
  /** Information about a token returned by an analyzer. */
12
20
  export declare interface AnalyzedTokenInfo {
13
21
  /**
@@ -54,6 +62,11 @@ export declare interface AnalyzeRequest {
54
62
  * NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.
55
63
  */
56
64
  tokenizerName?: LexicalTokenizerName;
65
+ /**
66
+ * The name of the normalizer to use to normalize the given text. {@link KnownNormalizerNames} is
67
+ * an enum containing built-in analyzer names.
68
+ */
69
+ normalizerName?: LexicalNormalizerName;
57
70
  /**
58
71
  * An optional list of token filters to use when breaking the given text. This parameter can only
59
72
  * be set when using the tokenizer parameter.
@@ -183,6 +196,58 @@ export declare interface AzureActiveDirectoryApplicationCredentials {
183
196
 
184
197
  export { AzureKeyCredential }
185
198
 
199
+ /** The AML skill allows you to extend AI enrichment with a custom Azure Machine Learning (AML) model. Once an AML model is trained and deployed, an AML skill integrates it into AI enrichment. */
200
+ export declare interface AzureMachineLearningSkill extends BaseSearchIndexerSkill {
201
+ /** Polymorphic discriminator, which specifies the different types this object can be */
202
+ odatatype: "#Microsoft.Skills.Custom.AmlSkill";
203
+ /** (Required for no authentication or key authentication) The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */
204
+ scoringUri?: string;
205
+ /** (Required for key authentication) The key for the AML service. */
206
+ authenticationKey?: string;
207
+ /** (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. */
208
+ resourceId?: string;
209
+ /** (Optional) When specified, indicates the timeout for the http client making the API call. */
210
+ timeout?: string;
211
+ /** (Optional for token authentication). The region the AML service is deployed in. */
212
+ region?: string;
213
+ /** (Optional) When specified, indicates the number of calls the indexer will make in parallel to the endpoint you have provided. You can decrease this value if your endpoint is failing under too high of a request load, or raise it if your endpoint is able to accept more requests and you would like an increase in the performance of the indexer. If not set, a default value of 5 is used. The degreeOfParallelism can be set to a maximum of 10 and a minimum of 1. */
214
+ degreeOfParallelism?: number;
215
+ }
216
+
217
+ /** Allows you to generate a vector embedding for a given text input using the Azure Open AI service. */
218
+ export declare interface AzureOpenAIEmbeddingSkill extends BaseSearchIndexerSkill {
219
+ /** Polymorphic discriminator, which specifies the different types this object can be */
220
+ odatatype: "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill";
221
+ /** The resource uri for your Azure Open AI resource. */
222
+ resourceUri?: string;
223
+ /** ID of your Azure Open AI model deployment on the designated resource. */
224
+ deploymentId?: string;
225
+ /** API key for the designated Azure Open AI resource. */
226
+ apiKey?: string;
227
+ /** The user-assigned managed identity used for outbound connections. */
228
+ authIdentity?: SearchIndexerDataIdentity;
229
+ }
230
+
231
+ /** Contains the parameters specific to using an Azure Open AI service for vectorization at query time. */
232
+ export declare interface AzureOpenAIParameters {
233
+ /** The resource uri for your Azure Open AI resource. */
234
+ resourceUri?: string;
235
+ /** ID of your Azure Open AI model deployment on the designated resource. */
236
+ deploymentId?: string;
237
+ /** API key for the designated Azure Open AI resource. */
238
+ apiKey?: string;
239
+ /** The user-assigned managed identity used for outbound connections. */
240
+ authIdentity?: SearchIndexerDataIdentity;
241
+ }
242
+
243
+ /** Contains the parameters specific to using an Azure Open AI service for vectorization at query time. */
244
+ export declare type AzureOpenAIVectorizer = BaseVectorSearchVectorizer & {
245
+ /** Polymorphic discriminator, which specifies the different types this object can be */
246
+ kind: "azureOpenAI";
247
+ /** Contains the parameters specific to Azure Open AI embedding vectorization. */
248
+ azureOpenAIParameters?: AzureOpenAIParameters;
249
+ };
250
+
186
251
  /** Base type for character filters. */
187
252
  export declare interface BaseCharFilter {
188
253
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -208,7 +273,7 @@ export declare interface BaseDataChangeDetectionPolicy {
208
273
  /** Base type for data deletion detection policies. */
209
274
  export declare interface BaseDataDeletionDetectionPolicy {
210
275
  /** Polymorphic discriminator, which specifies the different types this object can be */
211
- odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy";
276
+ odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy" | "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy";
212
277
  }
213
278
 
214
279
  /** Base type for analyzers. */
@@ -219,6 +284,14 @@ export declare interface BaseLexicalAnalyzer {
219
284
  name: string;
220
285
  }
221
286
 
287
+ /** Base type for normalizers. */
288
+ export declare interface BaseLexicalNormalizer {
289
+ /** Polymorphic discriminator, which specifies the different types this object can be */
290
+ odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
291
+ /** The name of the normalizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. */
292
+ name: string;
293
+ }
294
+
222
295
  /** Base type for tokenizers. */
223
296
  export declare interface BaseLexicalTokenizer {
224
297
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -239,11 +312,17 @@ export declare interface BaseScoringFunction {
239
312
  interpolation?: ScoringFunctionInterpolation;
240
313
  }
241
314
 
315
+ /** Abstract base type for data identities. */
316
+ export declare interface BaseSearchIndexerDataIdentity {
317
+ /** Polymorphic discriminator, which specifies the different types this object can be */
318
+ odatatype: "#Microsoft.Azure.Search.DataNoneIdentity" | "#Microsoft.Azure.Search.DataUserAssignedIdentity";
319
+ }
320
+
242
321
  /** Base type for skills. */
243
322
  export declare interface BaseSearchIndexerSkill {
244
323
  /** Polymorphic discriminator, which specifies the different types this object can be */
245
- odatatype: "#Microsoft.Skills.Util.ConditionalSkill" | "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" | "#Microsoft.Skills.Vision.OcrSkill" | "#Microsoft.Skills.Vision.ImageAnalysisSkill" | "#Microsoft.Skills.Text.LanguageDetectionSkill" | "#Microsoft.Skills.Util.ShaperSkill" | "#Microsoft.Skills.Text.MergeSkill" | "#Microsoft.Skills.Text.EntityRecognitionSkill" | "#Microsoft.Skills.Text.SentimentSkill" | "#Microsoft.Skills.Text.V3.SentimentSkill" | "#Microsoft.Skills.Text.V3.EntityLinkingSkill" | "#Microsoft.Skills.Text.V3.EntityRecognitionSkill" | "#Microsoft.Skills.Text.PIIDetectionSkill" | "#Microsoft.Skills.Text.SplitSkill" | "#Microsoft.Skills.Text.CustomEntityLookupSkill" | "#Microsoft.Skills.Text.TranslationSkill" | "#Microsoft.Skills.Util.DocumentExtractionSkill" | "#Microsoft.Skills.Custom.WebApiSkill";
246
- /** The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character `#`. */
324
+ odatatype: "#Microsoft.Skills.Util.ConditionalSkill" | "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" | "#Microsoft.Skills.Vision.OcrSkill" | "#Microsoft.Skills.Vision.ImageAnalysisSkill" | "#Microsoft.Skills.Text.LanguageDetectionSkill" | "#Microsoft.Skills.Util.ShaperSkill" | "#Microsoft.Skills.Text.MergeSkill" | "#Microsoft.Skills.Text.EntityRecognitionSkill" | "#Microsoft.Skills.Text.SentimentSkill" | "#Microsoft.Skills.Text.V3.SentimentSkill" | "#Microsoft.Skills.Text.V3.EntityLinkingSkill" | "#Microsoft.Skills.Text.V3.EntityRecognitionSkill" | "#Microsoft.Skills.Text.PIIDetectionSkill" | "#Microsoft.Skills.Text.SplitSkill" | "#Microsoft.Skills.Text.CustomEntityLookupSkill" | "#Microsoft.Skills.Text.TranslationSkill" | "#Microsoft.Skills.Util.DocumentExtractionSkill" | "#Microsoft.Skills.Custom.WebApiSkill" | "#Microsoft.Skills.Custom.AmlSkill" | "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill";
325
+ /** The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. */
247
326
  name?: string;
248
327
  /** The description of the skill which describes the inputs, outputs, and usage of the skill. */
249
328
  description?: string;
@@ -327,6 +406,14 @@ export declare interface BaseSearchRequestOptions<TModel extends object, TFields
327
406
  * fielded search expression take precedence over any field names listed in this parameter.
328
407
  */
329
408
  searchFields?: SearchFieldArray<TModel>;
409
+ /**
410
+ * The language of the query.
411
+ */
412
+ queryLanguage?: QueryLanguage;
413
+ /**
414
+ * Improve search recall by spell-correcting individual search query terms.
415
+ */
416
+ speller?: Speller;
330
417
  /**
331
418
  * A value that specifies whether any or all of the search terms must be matched in order to
332
419
  * count the document as a match. Possible values include: 'any', 'all'
@@ -386,8 +473,19 @@ export declare interface BaseVectorQuery<TModel extends object> {
386
473
  kNearestNeighborsCount?: number;
387
474
  /** Vector Fields of type Collection(Edm.Single) to be included in the vector searched. */
388
475
  fields?: SearchFieldArray<TModel>;
389
- /** When true, triggers an exhaustive k-nearest neighbor search across all vectors within the vector index. Useful for scenarios where exact matches are critical, such as determining ground truth values. */
476
+ /**
477
+ * When true, triggers an exhaustive k-nearest neighbor search across all vectors within the
478
+ * vector index. Useful for scenarios where exact matches are critical, such as determining ground
479
+ * truth values.
480
+ */
390
481
  exhaustive?: boolean;
482
+ /**
483
+ * Oversampling factor. Minimum value is 1. It overrides the 'defaultOversampling' parameter
484
+ * configured in the index definition. It can be set only when 'rerankWithOriginalVectors' is
485
+ * true. This parameter is only permitted when a compression method is used on the underlying
486
+ * vector field.
487
+ */
488
+ oversampling?: number;
391
489
  }
392
490
 
393
491
  /** Contains configuration options specific to the algorithm used during indexing and/or querying. */
@@ -398,38 +496,32 @@ export declare interface BaseVectorSearchAlgorithmConfiguration {
398
496
  name: string;
399
497
  }
400
498
 
401
- /**
402
- * ### Known values supported by the service
403
- * **storageMetadata**: Indexes just the standard blob properties and user-specified metadata.
404
- * **allMetadata**: Extracts metadata provided by the Azure blob storage subsystem and the content-type specific metadata (for example, metadata unique to just .png files are indexed).
405
- * **contentAndMetadata**: Extracts all metadata and textual content from each blob.
406
- */
499
+ /** Contains configuration options specific to the compression method used during indexing or querying. */
500
+ export declare interface BaseVectorSearchCompressionConfiguration {
501
+ /** Polymorphic discriminator, which specifies the different types this object can be */
502
+ kind: "scalarQuantization";
503
+ /** The name to associate with this particular configuration. */
504
+ name: string;
505
+ /** If set to true, once the ordered set of results calculated using compressed vectors are obtained, they will be reranked again by recalculating the full-precision similarity scores. This will improve recall at the expense of latency. */
506
+ rerankWithOriginalVectors?: boolean;
507
+ /** Default oversampling factor. Oversampling will internally request more documents (specified by this multiplier) in the initial search. This increases the set of results that will be reranked using recomputed similarity scores from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. */
508
+ defaultOversampling?: number;
509
+ }
510
+
511
+ /** Contains specific details for a vectorization method to be used during query time. */
512
+ export declare interface BaseVectorSearchVectorizer {
513
+ /** Polymorphic discriminator, which specifies the different types this object can be */
514
+ kind: VectorSearchVectorizerKind;
515
+ /** The name to associate with this particular vectorization method. */
516
+ name: string;
517
+ }
518
+
407
519
  export declare type BlobIndexerDataToExtract = "storageMetadata" | "allMetadata" | "contentAndMetadata";
408
520
 
409
- /**
410
- * ### Known values supported by the service
411
- * **none**: Ignores embedded images or image files in the data set. This is the default.
412
- * **generateNormalizedImages**: Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field. This action requires that "dataToExtract" is set to "contentAndMetadata". A normalized image refers to additional processing resulting in uniform image output, sized and rotated to promote consistent rendering when you include images in visual search results. This information is generated for each image when you use this option.
413
- * **generateNormalizedImagePerPage**: Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field, but treats PDF files differently in that each page will be rendered as an image and normalized accordingly, instead of extracting embedded images. Non-PDF file types will be treated the same as if "generateNormalizedImages" was set.
414
- */
415
521
  export declare type BlobIndexerImageAction = "none" | "generateNormalizedImages" | "generateNormalizedImagePerPage";
416
522
 
417
- /**
418
- * ### Known values supported by the service
419
- * **default**: Set to default for normal file processing.
420
- * **text**: Set to text to improve indexing performance on plain text files in blob storage.
421
- * **delimitedText**: Set to delimitedText when blobs are plain CSV files.
422
- * **json**: Set to json to extract structured content from JSON files.
423
- * **jsonArray**: Set to jsonArray to extract individual elements of a JSON array as separate documents in Azure Cognitive Search.
424
- * **jsonLines**: Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents in Azure Cognitive Search.
425
- */
426
523
  export declare type BlobIndexerParsingMode = "default" | "text" | "delimitedText" | "json" | "jsonArray" | "jsonLines";
427
524
 
428
- /**
429
- * ### Known values supported by the service
430
- * **none**: Leverages normal text extraction. This is the default.
431
- * **detectAngles**: May produce better and more readable text extraction from PDF files that have rotated text within them. Note that there may be a small performance speed impact when this parameter is used. This parameter only applies to PDF files, and only to PDFs with embedded text. If the rotated text appears within an embedded image in the PDF, this parameter does not apply.
432
- */
433
525
  export declare type BlobIndexerPDFTextRotationAlgorithm = "none" | "detectAngles";
434
526
 
435
527
  /** Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm that includes length normalization (controlled by the 'b' parameter) as well as term frequency saturation (controlled by the 'k1' parameter). */
@@ -488,7 +580,7 @@ export declare interface ClassicTokenizer extends BaseLexicalTokenizer {
488
580
  */
489
581
  export declare type CognitiveServicesAccount = DefaultCognitiveServicesAccount | CognitiveServicesAccountKey;
490
582
 
491
- /** An Azure AI service resource provisioned with a key that is attached to a skillset. */
583
+ /** The multi-region account key of an Azure AI service resource that's attached to a skillset. */
492
584
  export declare interface CognitiveServicesAccountKey extends BaseCognitiveServicesAccount {
493
585
  /** Polymorphic discriminator, which specifies the different types this object can be */
494
586
  odatatype: "#Microsoft.Azure.Search.CognitiveServicesByKey";
@@ -544,7 +636,7 @@ export declare interface ConditionalSkill extends BaseSearchIndexerSkill {
544
636
 
545
637
  /** Defines options to control Cross-Origin Resource Sharing (CORS) for an index. */
546
638
  export declare interface CorsOptions {
547
- /** The list of origins from which JavaScript code will be granted access to your index. Can contain a list of hosts of the form {protocol}://{fully-qualified-domain-name}[:{port#}], or a single `*` to allow all origins (not recommended). */
639
+ /** The list of origins from which JavaScript code will be granted access to your index. Can contain a list of hosts of the form {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not recommended). */
548
640
  allowedOrigins: string[];
549
641
  /** The duration for which browsers should cache CORS preflight responses. Defaults to 5 minutes. */
550
642
  maxAgeInSeconds?: number;
@@ -555,6 +647,11 @@ export declare interface CorsOptions {
555
647
  */
556
648
  export declare type CountDocumentsOptions = OperationOptions;
557
649
 
650
+ /**
651
+ * Options for create alias operation.
652
+ */
653
+ export declare type CreateAliasOptions = OperationOptions;
654
+
558
655
  /**
559
656
  * Options for create datasource operation.
560
657
  */
@@ -570,6 +667,16 @@ export declare type CreateIndexerOptions = OperationOptions;
570
667
  */
571
668
  export declare type CreateIndexOptions = OperationOptions;
572
669
 
670
+ /**
671
+ * Options for create or update alias operation.
672
+ */
673
+ export declare interface CreateOrUpdateAliasOptions extends OperationOptions {
674
+ /**
675
+ * If set to true, Resource will be deleted only if the etag matches.
676
+ */
677
+ onlyIfUnchanged?: boolean;
678
+ }
679
+
573
680
  /**
574
681
  * Options for create/update datasource operation.
575
682
  */
@@ -578,6 +685,10 @@ export declare interface CreateorUpdateDataSourceConnectionOptions extends Opera
578
685
  * If set to true, Resource will be updated only if the etag matches.
579
686
  */
580
687
  onlyIfUnchanged?: boolean;
688
+ /**
689
+ * Ignores cache reset requirements.
690
+ */
691
+ skipIndexerResetRequirementForCache?: boolean;
581
692
  }
582
693
 
583
694
  /**
@@ -588,6 +699,10 @@ export declare interface CreateorUpdateIndexerOptions extends OperationOptions {
588
699
  * If set to true, Resource will be updated only if the etag matches.
589
700
  */
590
701
  onlyIfUnchanged?: boolean;
702
+ /** Ignores cache reset requirements. */
703
+ skipIndexerResetRequirementForCache?: boolean;
704
+ /** Disables cache reprocessing change detection. */
705
+ disableCacheReprocessingChangeDetection?: boolean;
591
706
  }
592
707
 
593
708
  /**
@@ -615,6 +730,14 @@ export declare interface CreateOrUpdateSkillsetOptions extends OperationOptions
615
730
  * If set to true, Resource will be updated only if the etag matches.
616
731
  */
617
732
  onlyIfUnchanged?: boolean;
733
+ /**
734
+ * Ignores cache reset requirements.
735
+ */
736
+ skipIndexerResetRequirementForCache?: boolean;
737
+ /**
738
+ * Disables cache reprocessing change detection.
739
+ */
740
+ disableCacheReprocessingChangeDetection?: boolean;
618
741
  }
619
742
 
620
743
  /**
@@ -739,12 +862,42 @@ export declare interface CustomEntityLookupSkill extends BaseSearchIndexerSkill
739
862
  globalDefaultFuzzyEditDistance?: number;
740
863
  }
741
864
 
742
- /**
743
- * Defines supported languages for {@link CustomEntityLookupSkill}
744
- * {@link KnownCustomEntityLookupSkillLanguage} can be used interchangeably with this type
745
- */
746
865
  export declare type CustomEntityLookupSkillLanguage = "da" | "de" | "en" | "es" | "fi" | "fr" | "it" | "ko" | "pt";
747
866
 
867
+ /** Allows you to configure normalization for filterable, sortable, and facetable fields, which by default operate with strict matching. This is a user-defined configuration consisting of at least one or more filters, which modify the token that is stored. */
868
+ export declare interface CustomNormalizer extends BaseLexicalNormalizer {
869
+ /** Polymorphic discriminator, which specifies the different types this object can be */
870
+ odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
871
+ /** A list of token filters used to filter out or modify the input token. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */
872
+ tokenFilters?: TokenFilterName[];
873
+ /** A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */
874
+ charFilters?: CharFilterName[];
875
+ }
876
+
877
+ /** Contains the parameters specific to generating vector embeddings via a custom endpoint. */
878
+ export declare type CustomVectorizer = BaseVectorSearchVectorizer & {
879
+ /** Polymorphic discriminator, which specifies the different types this object can be */
880
+ kind: "customWebApi";
881
+ /** Contains the parameters specific to generating vector embeddings via a custom endpoint. */
882
+ customVectorizerParameters?: CustomVectorizerParameters;
883
+ };
884
+
885
+ /** Contains the parameters specific to generating vector embeddings via a custom endpoint. */
886
+ export declare interface CustomVectorizerParameters {
887
+ /** The uri for the Web API. */
888
+ uri?: string;
889
+ /** The headers required to make the http request. */
890
+ httpHeaders?: Record<string, string>;
891
+ /** The method for the http request. */
892
+ httpMethod?: string;
893
+ /** The desired timeout for the request. Default is 30 seconds. */
894
+ timeout?: string;
895
+ /** Applies to custom endpoints that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the vectorization connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */
896
+ authResourceId?: string;
897
+ /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */
898
+ authIdentity?: SearchIndexerDataIdentity;
899
+ }
900
+
748
901
  /**
749
902
  * Contains the possible cases for DataChangeDetectionPolicy.
750
903
  */
@@ -753,7 +906,7 @@ export declare type DataChangeDetectionPolicy = HighWaterMarkChangeDetectionPoli
753
906
  /**
754
907
  * Contains the possible cases for DataDeletionDetectionPolicy.
755
908
  */
756
- export declare type DataDeletionDetectionPolicy = SoftDeleteColumnDeletionDetectionPolicy;
909
+ export declare type DataDeletionDetectionPolicy = SoftDeleteColumnDeletionDetectionPolicy | NativeBlobSoftDeleteDeletionDetectionPolicy;
757
910
 
758
911
  /**
759
912
  * Default Batch Size
@@ -776,6 +929,16 @@ export declare interface DefaultCognitiveServicesAccount extends BaseCognitiveSe
776
929
  odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices";
777
930
  }
778
931
 
932
+ /**
933
+ * Options for delete alias operation.
934
+ */
935
+ export declare interface DeleteAliasOptions extends OperationOptions {
936
+ /**
937
+ * If set to true, Resource will be deleted only if the etag matches.
938
+ */
939
+ onlyIfUnchanged?: boolean;
940
+ }
941
+
779
942
  /**
780
943
  * Options for delete datasource operation.
781
944
  */
@@ -863,6 +1026,15 @@ export declare interface DistanceScoringParameters {
863
1026
  boostingDistance: number;
864
1027
  }
865
1028
 
1029
+ /** Contains debugging information that can be used to further explore your search results. */
1030
+ export declare interface DocumentDebugInfo {
1031
+ /**
1032
+ * Contains debugging information specific to semantic search queries.
1033
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1034
+ */
1035
+ readonly semantic?: SemanticDebugInfo;
1036
+ }
1037
+
866
1038
  /** A skill that extracts content from a file within the enrichment pipeline. */
867
1039
  export declare interface DocumentExtractionSkill extends BaseSearchIndexerSkill {
868
1040
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -963,10 +1135,6 @@ export declare interface EntityRecognitionSkill extends BaseSearchIndexerSkill {
963
1135
  minimumPrecision?: number;
964
1136
  }
965
1137
 
966
- /**
967
- * Defines supported languages for {@link EntityRecognitionSkill}
968
- * {@link KnownEntityRecognitionSkillLanguage} can be used interchangeably with this type
969
- */
970
1138
  export declare type EntityRecognitionSkillLanguage = "ar" | "cs" | "zh-Hans" | "zh-Hant" | "da" | "nl" | "en" | "fi" | "fr" | "de" | "el" | "hu" | "it" | "ja" | "ko" | "no" | "pl" | "pt-PT" | "pt-BR" | "ru" | "es" | "sv" | "tr";
971
1139
 
972
1140
  /** Using the Text Analytics API, extracts entities of different types from text. */
@@ -979,7 +1147,7 @@ export declare interface EntityRecognitionSkillV3 extends BaseSearchIndexerSkill
979
1147
  defaultLanguageCode?: string;
980
1148
  /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */
981
1149
  minimumPrecision?: number;
982
- /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */
1150
+ /** The version of the model to use when calling the Text Analytics API. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */
983
1151
  modelVersion?: string;
984
1152
  }
985
1153
 
@@ -1097,6 +1265,11 @@ export declare class GeographyPoint {
1097
1265
  toJSON(): Record<string, unknown>;
1098
1266
  }
1099
1267
 
1268
+ /**
1269
+ * Options for get alias operation.
1270
+ */
1271
+ export declare type GetAliasOptions = OperationOptions;
1272
+
1100
1273
  /**
1101
1274
  * Options for get datasource operation.
1102
1275
  */
@@ -1225,10 +1398,6 @@ export declare interface ImageAnalysisSkill extends BaseSearchIndexerSkill {
1225
1398
  details?: ImageDetail[];
1226
1399
  }
1227
1400
 
1228
- /**
1229
- * Defines supported languages for {@link ImageAnalysisSkill}
1230
- * {@link KnownImageAnalysisSkillLanguage} can be used interchangeably with this type
1231
- */
1232
1401
  export declare type ImageAnalysisSkillLanguage = "ar" | "az" | "bg" | "bs" | "ca" | "cs" | "cy" | "da" | "de" | "el" | "en" | "es" | "et" | "eu" | "fi" | "fr" | "ga" | "gl" | "he" | "hi" | "hr" | "hu" | "id" | "it" | "ja" | "kk" | "ko" | "lt" | "lv" | "mk" | "ms" | "nb" | "nl" | "pl" | "prs" | "pt-BR" | "pt" | "pt-PT" | "ro" | "ru" | "sk" | "sl" | "sr-Cyrl" | "sr-Latn" | "sv" | "th" | "tr" | "uk" | "vi" | "zh" | "zh-Hans" | "zh-Hant";
1233
1402
 
1234
1403
  export declare type ImageDetail = "celebrities" | "landmarks";
@@ -1321,11 +1490,6 @@ export declare interface IndexDocumentsResult {
1321
1490
  readonly results: IndexingResult[];
1322
1491
  }
1323
1492
 
1324
- /**
1325
- * ### Known values supported by the service
1326
- * **standard**: Indicates that Azure Cognitive Search can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value.
1327
- * **private**: Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources.
1328
- */
1329
1493
  export declare type IndexerExecutionEnvironment = "standard" | "private";
1330
1494
 
1331
1495
  /** Represents the result of an individual indexer execution. */
@@ -1335,6 +1499,16 @@ export declare interface IndexerExecutionResult {
1335
1499
  * NOTE: This property will not be serialized. It can only be populated by the server.
1336
1500
  */
1337
1501
  readonly status: IndexerExecutionStatus;
1502
+ /**
1503
+ * The outcome of this indexer execution.
1504
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1505
+ */
1506
+ readonly statusDetail?: IndexerExecutionStatusDetail;
1507
+ /**
1508
+ * All of the state that defines and dictates the indexer's current execution.
1509
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1510
+ */
1511
+ readonly currentState?: IndexerState;
1338
1512
  /**
1339
1513
  * The error message indicating the top-level error, if any.
1340
1514
  * NOTE: This property will not be serialized. It can only be populated by the server.
@@ -1385,9 +1559,67 @@ export declare interface IndexerExecutionResult {
1385
1559
  /** Defines values for IndexerExecutionStatus. */
1386
1560
  export declare type IndexerExecutionStatus = "transientFailure" | "success" | "inProgress" | "reset";
1387
1561
 
1562
+ /**
1563
+ * Defines values for IndexerExecutionStatusDetail. \
1564
+ * {@link KnownIndexerExecutionStatusDetail} can be used interchangeably with IndexerExecutionStatusDetail,
1565
+ * this enum contains the known values that the service supports.
1566
+ * ### Known values supported by the service
1567
+ * **resetDocs**: Indicates that the reset that occurred was for a call to ResetDocs.
1568
+ */
1569
+ export declare type IndexerExecutionStatusDetail = string;
1570
+
1571
+ /** Represents all of the state that defines and dictates the indexer's current execution. */
1572
+ export declare interface IndexerState {
1573
+ /**
1574
+ * The mode the indexer is running in.
1575
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1576
+ */
1577
+ readonly mode?: IndexingMode;
1578
+ /**
1579
+ * Change tracking state used when indexing starts on all documents in the datasource.
1580
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1581
+ */
1582
+ readonly allDocumentsInitialChangeTrackingState?: string;
1583
+ /**
1584
+ * Change tracking state value when indexing finishes on all documents in the datasource.
1585
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1586
+ */
1587
+ readonly allDocumentsFinalChangeTrackingState?: string;
1588
+ /**
1589
+ * Change tracking state used when indexing starts on select, reset documents in the datasource.
1590
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1591
+ */
1592
+ readonly resetDocumentsInitialChangeTrackingState?: string;
1593
+ /**
1594
+ * Change tracking state value when indexing finishes on select, reset documents in the datasource.
1595
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1596
+ */
1597
+ readonly resetDocumentsFinalChangeTrackingState?: string;
1598
+ /**
1599
+ * The list of document keys that have been reset. The document key is the document's unique identifier for the data in the search index. The indexer will prioritize selectively re-ingesting these keys.
1600
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1601
+ */
1602
+ readonly resetDocumentKeys?: string[];
1603
+ /**
1604
+ * The list of datasource document ids that have been reset. The datasource document id is the unique identifier for the data in the datasource. The indexer will prioritize selectively re-ingesting these ids.
1605
+ * NOTE: This property will not be serialized. It can only be populated by the server.
1606
+ */
1607
+ readonly resetDatasourceDocumentIds?: string[];
1608
+ }
1609
+
1388
1610
  /** Defines values for IndexerStatus. */
1389
1611
  export declare type IndexerStatus = "unknown" | "error" | "running";
1390
1612
 
1613
+ /**
1614
+ * Defines values for IndexingMode. \
1615
+ * {@link KnownIndexingMode} can be used interchangeably with IndexingMode,
1616
+ * this enum contains the known values that the service supports.
1617
+ * ### Known values supported by the service
1618
+ * **indexingAllDocs**: The indexer is indexing all documents in the datasource. \
1619
+ * **indexingResetDocs**: The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status.
1620
+ */
1621
+ export declare type IndexingMode = string;
1622
+
1391
1623
  /** Represents parameters for indexer execution. */
1392
1624
  export declare interface IndexingParameters {
1393
1625
  /** The number of items that are read from the data source and indexed as a single batch in order to improve performance. The default depends on the data source type. */
@@ -1484,6 +1716,16 @@ export declare type IndexIterator = PagedAsyncIterableIterator<SearchIndex, Sear
1484
1716
  */
1485
1717
  export declare type IndexNameIterator = PagedAsyncIterableIterator<string, string[], {}>;
1486
1718
 
1719
+ /**
1720
+ * Defines values for IndexProjectionMode. \
1721
+ * {@link KnownIndexProjectionMode} can be used interchangeably with IndexProjectionMode,
1722
+ * this enum contains the known values that the service supports.
1723
+ * ### Known values supported by the service
1724
+ * **skipIndexingParentDocuments**: The source document will be skipped from writing into the indexer's target index. \
1725
+ * **includeIndexingParentDocuments**: The source document will be written into the indexer's target index. This is the default pattern.
1726
+ */
1727
+ export declare type IndexProjectionMode = string;
1728
+
1487
1729
  /** Input field mapping for a skill. */
1488
1730
  export declare interface InputFieldMappingEntry {
1489
1731
  /** The name of the input. */
@@ -1518,10 +1760,6 @@ export declare interface KeyPhraseExtractionSkill extends BaseSearchIndexerSkill
1518
1760
  modelVersion?: string;
1519
1761
  }
1520
1762
 
1521
- /**
1522
- * Defines supported languages for {@link KeyPhraseExtractionSkill}
1523
- * {@link KnownKeyPhraseExtractionSkillLanguage} can be used interchangeably with this type
1524
- */
1525
1763
  export declare type KeyPhraseExtractionSkillLanguage = "da" | "nl" | "en" | "fi" | "fr" | "de" | "it" | "ja" | "ko" | "no" | "pl" | "pt-PT" | "pt-BR" | "ru" | "es" | "sv";
1526
1764
 
1527
1765
  /** Marks terms as keywords. This token filter is implemented using Apache Lucene. */
@@ -1979,11 +2217,23 @@ export declare enum KnownBlobIndexerPDFTextRotationAlgorithm {
1979
2217
  }
1980
2218
 
1981
2219
  /** Known values of {@link CharFilterName} that the service accepts. */
1982
- export declare enum KnownCharFilterNames {
2220
+ export declare enum KnownCharFilterName {
1983
2221
  /** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */
1984
2222
  HtmlStrip = "html_strip"
1985
2223
  }
1986
2224
 
2225
+ /**
2226
+ * Defines values for CharFilterName.
2227
+ * @readonly
2228
+ */
2229
+ export declare enum KnownCharFilterNames {
2230
+ /**
2231
+ * A character filter that attempts to strip out HTML constructs. See
2232
+ * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.html
2233
+ */
2234
+ HtmlStrip = "html_strip"
2235
+ }
2236
+
1987
2237
  /** Known values of {@link CustomEntityLookupSkillLanguage} that the service accepts. */
1988
2238
  export declare enum KnownCustomEntityLookupSkillLanguage {
1989
2239
  /** Danish */
@@ -2190,6 +2440,36 @@ export declare enum KnownImageDetail {
2190
2440
  Landmarks = "landmarks"
2191
2441
  }
2192
2442
 
2443
+ /** Known values of {@link IndexerExecutionEnvironment} that the service accepts. */
2444
+ export declare enum KnownIndexerExecutionEnvironment {
2445
+ /** Indicates that the search service can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. */
2446
+ Standard = "standard",
2447
+ /** Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources. */
2448
+ Private = "private"
2449
+ }
2450
+
2451
+ /** Known values of {@link IndexerExecutionStatusDetail} that the service accepts. */
2452
+ export declare enum KnownIndexerExecutionStatusDetail {
2453
+ /** Indicates that the reset that occurred was for a call to ResetDocs. */
2454
+ ResetDocs = "resetDocs"
2455
+ }
2456
+
2457
+ /** Known values of {@link IndexingMode} that the service accepts. */
2458
+ export declare enum KnownIndexingMode {
2459
+ /** The indexer is indexing all documents in the datasource. */
2460
+ IndexingAllDocs = "indexingAllDocs",
2461
+ /** The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status. */
2462
+ IndexingResetDocs = "indexingResetDocs"
2463
+ }
2464
+
2465
+ /** Known values of {@link IndexProjectionMode} that the service accepts. */
2466
+ export declare enum KnownIndexProjectionMode {
2467
+ /** The source document will be skipped from writing into the indexer's target index. */
2468
+ SkipIndexingParentDocuments = "skipIndexingParentDocuments",
2469
+ /** The source document will be written into the indexer's target index. This is the default pattern. */
2470
+ IncludeIndexingParentDocuments = "includeIndexingParentDocuments"
2471
+ }
2472
+
2193
2473
  /** Known values of {@link KeyPhraseExtractionSkillLanguage} that the service accepts. */
2194
2474
  export declare enum KnownKeyPhraseExtractionSkillLanguage {
2195
2475
  /** Danish */
@@ -2226,6 +2506,254 @@ export declare enum KnownKeyPhraseExtractionSkillLanguage {
2226
2506
  Sv = "sv"
2227
2507
  }
2228
2508
 
2509
+ /** Known values of {@link LexicalAnalyzerName} that the service accepts. */
2510
+ export declare enum KnownLexicalAnalyzerName {
2511
+ /** Microsoft analyzer for Arabic. */
2512
+ ArMicrosoft = "ar.microsoft",
2513
+ /** Lucene analyzer for Arabic. */
2514
+ ArLucene = "ar.lucene",
2515
+ /** Lucene analyzer for Armenian. */
2516
+ HyLucene = "hy.lucene",
2517
+ /** Microsoft analyzer for Bangla. */
2518
+ BnMicrosoft = "bn.microsoft",
2519
+ /** Lucene analyzer for Basque. */
2520
+ EuLucene = "eu.lucene",
2521
+ /** Microsoft analyzer for Bulgarian. */
2522
+ BgMicrosoft = "bg.microsoft",
2523
+ /** Lucene analyzer for Bulgarian. */
2524
+ BgLucene = "bg.lucene",
2525
+ /** Microsoft analyzer for Catalan. */
2526
+ CaMicrosoft = "ca.microsoft",
2527
+ /** Lucene analyzer for Catalan. */
2528
+ CaLucene = "ca.lucene",
2529
+ /** Microsoft analyzer for Chinese (Simplified). */
2530
+ ZhHansMicrosoft = "zh-Hans.microsoft",
2531
+ /** Lucene analyzer for Chinese (Simplified). */
2532
+ ZhHansLucene = "zh-Hans.lucene",
2533
+ /** Microsoft analyzer for Chinese (Traditional). */
2534
+ ZhHantMicrosoft = "zh-Hant.microsoft",
2535
+ /** Lucene analyzer for Chinese (Traditional). */
2536
+ ZhHantLucene = "zh-Hant.lucene",
2537
+ /** Microsoft analyzer for Croatian. */
2538
+ HrMicrosoft = "hr.microsoft",
2539
+ /** Microsoft analyzer for Czech. */
2540
+ CsMicrosoft = "cs.microsoft",
2541
+ /** Lucene analyzer for Czech. */
2542
+ CsLucene = "cs.lucene",
2543
+ /** Microsoft analyzer for Danish. */
2544
+ DaMicrosoft = "da.microsoft",
2545
+ /** Lucene analyzer for Danish. */
2546
+ DaLucene = "da.lucene",
2547
+ /** Microsoft analyzer for Dutch. */
2548
+ NlMicrosoft = "nl.microsoft",
2549
+ /** Lucene analyzer for Dutch. */
2550
+ NlLucene = "nl.lucene",
2551
+ /** Microsoft analyzer for English. */
2552
+ EnMicrosoft = "en.microsoft",
2553
+ /** Lucene analyzer for English. */
2554
+ EnLucene = "en.lucene",
2555
+ /** Microsoft analyzer for Estonian. */
2556
+ EtMicrosoft = "et.microsoft",
2557
+ /** Microsoft analyzer for Finnish. */
2558
+ FiMicrosoft = "fi.microsoft",
2559
+ /** Lucene analyzer for Finnish. */
2560
+ FiLucene = "fi.lucene",
2561
+ /** Microsoft analyzer for French. */
2562
+ FrMicrosoft = "fr.microsoft",
2563
+ /** Lucene analyzer for French. */
2564
+ FrLucene = "fr.lucene",
2565
+ /** Lucene analyzer for Galician. */
2566
+ GlLucene = "gl.lucene",
2567
+ /** Microsoft analyzer for German. */
2568
+ DeMicrosoft = "de.microsoft",
2569
+ /** Lucene analyzer for German. */
2570
+ DeLucene = "de.lucene",
2571
+ /** Microsoft analyzer for Greek. */
2572
+ ElMicrosoft = "el.microsoft",
2573
+ /** Lucene analyzer for Greek. */
2574
+ ElLucene = "el.lucene",
2575
+ /** Microsoft analyzer for Gujarati. */
2576
+ GuMicrosoft = "gu.microsoft",
2577
+ /** Microsoft analyzer for Hebrew. */
2578
+ HeMicrosoft = "he.microsoft",
2579
+ /** Microsoft analyzer for Hindi. */
2580
+ HiMicrosoft = "hi.microsoft",
2581
+ /** Lucene analyzer for Hindi. */
2582
+ HiLucene = "hi.lucene",
2583
+ /** Microsoft analyzer for Hungarian. */
2584
+ HuMicrosoft = "hu.microsoft",
2585
+ /** Lucene analyzer for Hungarian. */
2586
+ HuLucene = "hu.lucene",
2587
+ /** Microsoft analyzer for Icelandic. */
2588
+ IsMicrosoft = "is.microsoft",
2589
+ /** Microsoft analyzer for Indonesian (Bahasa). */
2590
+ IdMicrosoft = "id.microsoft",
2591
+ /** Lucene analyzer for Indonesian. */
2592
+ IdLucene = "id.lucene",
2593
+ /** Lucene analyzer for Irish. */
2594
+ GaLucene = "ga.lucene",
2595
+ /** Microsoft analyzer for Italian. */
2596
+ ItMicrosoft = "it.microsoft",
2597
+ /** Lucene analyzer for Italian. */
2598
+ ItLucene = "it.lucene",
2599
+ /** Microsoft analyzer for Japanese. */
2600
+ JaMicrosoft = "ja.microsoft",
2601
+ /** Lucene analyzer for Japanese. */
2602
+ JaLucene = "ja.lucene",
2603
+ /** Microsoft analyzer for Kannada. */
2604
+ KnMicrosoft = "kn.microsoft",
2605
+ /** Microsoft analyzer for Korean. */
2606
+ KoMicrosoft = "ko.microsoft",
2607
+ /** Lucene analyzer for Korean. */
2608
+ KoLucene = "ko.lucene",
2609
+ /** Microsoft analyzer for Latvian. */
2610
+ LvMicrosoft = "lv.microsoft",
2611
+ /** Lucene analyzer for Latvian. */
2612
+ LvLucene = "lv.lucene",
2613
+ /** Microsoft analyzer for Lithuanian. */
2614
+ LtMicrosoft = "lt.microsoft",
2615
+ /** Microsoft analyzer for Malayalam. */
2616
+ MlMicrosoft = "ml.microsoft",
2617
+ /** Microsoft analyzer for Malay (Latin). */
2618
+ MsMicrosoft = "ms.microsoft",
2619
+ /** Microsoft analyzer for Marathi. */
2620
+ MrMicrosoft = "mr.microsoft",
2621
+ /** Microsoft analyzer for Norwegian (Bokmål). */
2622
+ NbMicrosoft = "nb.microsoft",
2623
+ /** Lucene analyzer for Norwegian. */
2624
+ NoLucene = "no.lucene",
2625
+ /** Lucene analyzer for Persian. */
2626
+ FaLucene = "fa.lucene",
2627
+ /** Microsoft analyzer for Polish. */
2628
+ PlMicrosoft = "pl.microsoft",
2629
+ /** Lucene analyzer for Polish. */
2630
+ PlLucene = "pl.lucene",
2631
+ /** Microsoft analyzer for Portuguese (Brazil). */
2632
+ PtBrMicrosoft = "pt-BR.microsoft",
2633
+ /** Lucene analyzer for Portuguese (Brazil). */
2634
+ PtBrLucene = "pt-BR.lucene",
2635
+ /** Microsoft analyzer for Portuguese (Portugal). */
2636
+ PtPtMicrosoft = "pt-PT.microsoft",
2637
+ /** Lucene analyzer for Portuguese (Portugal). */
2638
+ PtPtLucene = "pt-PT.lucene",
2639
+ /** Microsoft analyzer for Punjabi. */
2640
+ PaMicrosoft = "pa.microsoft",
2641
+ /** Microsoft analyzer for Romanian. */
2642
+ RoMicrosoft = "ro.microsoft",
2643
+ /** Lucene analyzer for Romanian. */
2644
+ RoLucene = "ro.lucene",
2645
+ /** Microsoft analyzer for Russian. */
2646
+ RuMicrosoft = "ru.microsoft",
2647
+ /** Lucene analyzer for Russian. */
2648
+ RuLucene = "ru.lucene",
2649
+ /** Microsoft analyzer for Serbian (Cyrillic). */
2650
+ SrCyrillicMicrosoft = "sr-cyrillic.microsoft",
2651
+ /** Microsoft analyzer for Serbian (Latin). */
2652
+ SrLatinMicrosoft = "sr-latin.microsoft",
2653
+ /** Microsoft analyzer for Slovak. */
2654
+ SkMicrosoft = "sk.microsoft",
2655
+ /** Microsoft analyzer for Slovenian. */
2656
+ SlMicrosoft = "sl.microsoft",
2657
+ /** Microsoft analyzer for Spanish. */
2658
+ EsMicrosoft = "es.microsoft",
2659
+ /** Lucene analyzer for Spanish. */
2660
+ EsLucene = "es.lucene",
2661
+ /** Microsoft analyzer for Swedish. */
2662
+ SvMicrosoft = "sv.microsoft",
2663
+ /** Lucene analyzer for Swedish. */
2664
+ SvLucene = "sv.lucene",
2665
+ /** Microsoft analyzer for Tamil. */
2666
+ TaMicrosoft = "ta.microsoft",
2667
+ /** Microsoft analyzer for Telugu. */
2668
+ TeMicrosoft = "te.microsoft",
2669
+ /** Microsoft analyzer for Thai. */
2670
+ ThMicrosoft = "th.microsoft",
2671
+ /** Lucene analyzer for Thai. */
2672
+ ThLucene = "th.lucene",
2673
+ /** Microsoft analyzer for Turkish. */
2674
+ TrMicrosoft = "tr.microsoft",
2675
+ /** Lucene analyzer for Turkish. */
2676
+ TrLucene = "tr.lucene",
2677
+ /** Microsoft analyzer for Ukrainian. */
2678
+ UkMicrosoft = "uk.microsoft",
2679
+ /** Microsoft analyzer for Urdu. */
2680
+ UrMicrosoft = "ur.microsoft",
2681
+ /** Microsoft analyzer for Vietnamese. */
2682
+ ViMicrosoft = "vi.microsoft",
2683
+ /** Standard Lucene analyzer. */
2684
+ StandardLucene = "standard.lucene",
2685
+ /** Standard ASCII Folding Lucene analyzer. See https:\//docs.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#Analyzers */
2686
+ StandardAsciiFoldingLucene = "standardasciifolding.lucene",
2687
+ /** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordAnalyzer.html */
2688
+ Keyword = "keyword",
2689
+ /** Flexibly separates text into terms via a regular expression pattern. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/PatternAnalyzer.html */
2690
+ Pattern = "pattern",
2691
+ /** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/SimpleAnalyzer.html */
2692
+ Simple = "simple",
2693
+ /** Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopAnalyzer.html */
2694
+ Stop = "stop",
2695
+ /** An analyzer that uses the whitespace tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceAnalyzer.html */
2696
+ Whitespace = "whitespace"
2697
+ }
2698
+
2699
+ /** Known values of {@link LexicalNormalizerName} that the service accepts. */
2700
+ declare enum KnownLexicalNormalizerName {
2701
+ /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
2702
+ AsciiFolding = "asciifolding",
2703
+ /** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
2704
+ Elision = "elision",
2705
+ /** Normalizes token text to lowercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
2706
+ Lowercase = "lowercase",
2707
+ /** Standard normalizer, which consists of lowercase and asciifolding. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
2708
+ Standard = "standard",
2709
+ /** Normalizes token text to uppercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
2710
+ Uppercase = "uppercase"
2711
+ }
2712
+ export { KnownLexicalNormalizerName }
2713
+ export { KnownLexicalNormalizerName as KnownNormalizerNames }
2714
+
2715
+ /** Known values of {@link LexicalTokenizerName} that the service accepts. */
2716
+ export declare enum KnownLexicalTokenizerName {
2717
+ /** Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html */
2718
+ Classic = "classic",
2719
+ /** Tokenizes the input from an edge into n-grams of the given size(s). See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenizer.html */
2720
+ EdgeNGram = "edgeNGram",
2721
+ /** Emits the entire input as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordTokenizer.html */
2722
+ Keyword = "keyword_v2",
2723
+ /** Divides text at non-letters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LetterTokenizer.html */
2724
+ Letter = "letter",
2725
+ /** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseTokenizer.html */
2726
+ Lowercase = "lowercase",
2727
+ /** Divides text using language-specific rules. */
2728
+ MicrosoftLanguageTokenizer = "microsoft_language_tokenizer",
2729
+ /** Divides text using language-specific rules and reduces words to their base forms. */
2730
+ MicrosoftLanguageStemmingTokenizer = "microsoft_language_stemming_tokenizer",
2731
+ /** Tokenizes the input into n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenizer.html */
2732
+ NGram = "nGram",
2733
+ /** Tokenizer for path-like hierarchies. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/path\/PathHierarchyTokenizer.html */
2734
+ PathHierarchy = "path_hierarchy_v2",
2735
+ /** Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/pattern\/PatternTokenizer.html */
2736
+ Pattern = "pattern",
2737
+ /** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/StandardTokenizer.html */
2738
+ Standard = "standard_v2",
2739
+ /** Tokenizes urls and emails as one token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/UAX29URLEmailTokenizer.html */
2740
+ UaxUrlEmail = "uax_url_email",
2741
+ /** Divides text at whitespace. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html */
2742
+ Whitespace = "whitespace"
2743
+ }
2744
+
2745
+ /** Known values of {@link LineEnding} that the service accepts. */
2746
+ export declare enum KnownLineEnding {
2747
+ /** Lines are separated by a single space character. */
2748
+ Space = "space",
2749
+ /** Lines are separated by a carriage return ('\r') character. */
2750
+ CarriageReturn = "carriageReturn",
2751
+ /** Lines are separated by a single line feed ('\n') character. */
2752
+ LineFeed = "lineFeed",
2753
+ /** Lines are separated by a carriage return and a line feed ('\r\n') character. */
2754
+ CarriageReturnLineFeed = "carriageReturnLineFeed"
2755
+ }
2756
+
2229
2757
  /** Known values of {@link OcrSkillLanguage} that the service accepts. */
2230
2758
  export declare enum KnownOcrSkillLanguage {
2231
2759
  /** Afrikaans */
@@ -2570,6 +3098,178 @@ export declare enum KnownOcrSkillLanguage {
2570
3098
  Unk = "unk"
2571
3099
  }
2572
3100
 
3101
+ /** Known values of {@link PIIDetectionSkillMaskingMode} that the service accepts. */
3102
+ export declare enum KnownPIIDetectionSkillMaskingMode {
3103
+ /** No masking occurs and the maskedText output will not be returned. */
3104
+ None = "none",
3105
+ /** Replaces the detected entities with the character given in the maskingCharacter parameter. The character will be repeated to the length of the detected entity so that the offsets will correctly correspond to both the input text as well as the output maskedText. */
3106
+ Replace = "replace"
3107
+ }
3108
+
3109
+ /** Known values of {@link QueryDebugMode} that the service accepts. */
3110
+ export declare enum KnownQueryDebugMode {
3111
+ /** No query debugging information will be returned. */
3112
+ Disabled = "disabled",
3113
+ /** Allows the user to further explore their reranked results. */
3114
+ Semantic = "semantic"
3115
+ }
3116
+
3117
+ /** Known values of {@link QueryLanguage} that the service accepts. */
3118
+ export declare enum KnownQueryLanguage {
3119
+ /** Query language not specified. */
3120
+ None = "none",
3121
+ /** Query language value for English (United States). */
3122
+ EnUs = "en-us",
3123
+ /** Query language value for English (Great Britain). */
3124
+ EnGb = "en-gb",
3125
+ /** Query language value for English (India). */
3126
+ EnIn = "en-in",
3127
+ /** Query language value for English (Canada). */
3128
+ EnCa = "en-ca",
3129
+ /** Query language value for English (Australia). */
3130
+ EnAu = "en-au",
3131
+ /** Query language value for French (France). */
3132
+ FrFr = "fr-fr",
3133
+ /** Query language value for French (Canada). */
3134
+ FrCa = "fr-ca",
3135
+ /** Query language value for German (Germany). */
3136
+ DeDe = "de-de",
3137
+ /** Query language value for Spanish (Spain). */
3138
+ EsEs = "es-es",
3139
+ /** Query language value for Spanish (Mexico). */
3140
+ EsMx = "es-mx",
3141
+ /** Query language value for Chinese (China). */
3142
+ ZhCn = "zh-cn",
3143
+ /** Query language value for Chinese (Taiwan). */
3144
+ ZhTw = "zh-tw",
3145
+ /** Query language value for Portuguese (Brazil). */
3146
+ PtBr = "pt-br",
3147
+ /** Query language value for Portuguese (Portugal). */
3148
+ PtPt = "pt-pt",
3149
+ /** Query language value for Italian (Italy). */
3150
+ ItIt = "it-it",
3151
+ /** Query language value for Japanese (Japan). */
3152
+ JaJp = "ja-jp",
3153
+ /** Query language value for Korean (Korea). */
3154
+ KoKr = "ko-kr",
3155
+ /** Query language value for Russian (Russia). */
3156
+ RuRu = "ru-ru",
3157
+ /** Query language value for Czech (Czech Republic). */
3158
+ CsCz = "cs-cz",
3159
+ /** Query language value for Dutch (Belgium). */
3160
+ NlBe = "nl-be",
3161
+ /** Query language value for Dutch (Netherlands). */
3162
+ NlNl = "nl-nl",
3163
+ /** Query language value for Hungarian (Hungary). */
3164
+ HuHu = "hu-hu",
3165
+ /** Query language value for Polish (Poland). */
3166
+ PlPl = "pl-pl",
3167
+ /** Query language value for Swedish (Sweden). */
3168
+ SvSe = "sv-se",
3169
+ /** Query language value for Turkish (Turkey). */
3170
+ TrTr = "tr-tr",
3171
+ /** Query language value for Hindi (India). */
3172
+ HiIn = "hi-in",
3173
+ /** Query language value for Arabic (Saudi Arabia). */
3174
+ ArSa = "ar-sa",
3175
+ /** Query language value for Arabic (Egypt). */
3176
+ ArEg = "ar-eg",
3177
+ /** Query language value for Arabic (Morocco). */
3178
+ ArMa = "ar-ma",
3179
+ /** Query language value for Arabic (Kuwait). */
3180
+ ArKw = "ar-kw",
3181
+ /** Query language value for Arabic (Jordan). */
3182
+ ArJo = "ar-jo",
3183
+ /** Query language value for Danish (Denmark). */
3184
+ DaDk = "da-dk",
3185
+ /** Query language value for Norwegian (Norway). */
3186
+ NoNo = "no-no",
3187
+ /** Query language value for Bulgarian (Bulgaria). */
3188
+ BgBg = "bg-bg",
3189
+ /** Query language value for Croatian (Croatia). */
3190
+ HrHr = "hr-hr",
3191
+ /** Query language value for Croatian (Bosnia and Herzegovina). */
3192
+ HrBa = "hr-ba",
3193
+ /** Query language value for Malay (Malaysia). */
3194
+ MsMy = "ms-my",
3195
+ /** Query language value for Malay (Brunei Darussalam). */
3196
+ MsBn = "ms-bn",
3197
+ /** Query language value for Slovenian (Slovenia). */
3198
+ SlSl = "sl-sl",
3199
+ /** Query language value for Tamil (India). */
3200
+ TaIn = "ta-in",
3201
+ /** Query language value for Vietnamese (Viet Nam). */
3202
+ ViVn = "vi-vn",
3203
+ /** Query language value for Greek (Greece). */
3204
+ ElGr = "el-gr",
3205
+ /** Query language value for Romanian (Romania). */
3206
+ RoRo = "ro-ro",
3207
+ /** Query language value for Icelandic (Iceland). */
3208
+ IsIs = "is-is",
3209
+ /** Query language value for Indonesian (Indonesia). */
3210
+ IdId = "id-id",
3211
+ /** Query language value for Thai (Thailand). */
3212
+ ThTh = "th-th",
3213
+ /** Query language value for Lithuanian (Lithuania). */
3214
+ LtLt = "lt-lt",
3215
+ /** Query language value for Ukrainian (Ukraine). */
3216
+ UkUa = "uk-ua",
3217
+ /** Query language value for Latvian (Latvia). */
3218
+ LvLv = "lv-lv",
3219
+ /** Query language value for Estonian (Estonia). */
3220
+ EtEe = "et-ee",
3221
+ /** Query language value for Catalan. */
3222
+ CaEs = "ca-es",
3223
+ /** Query language value for Finnish (Finland). */
3224
+ FiFi = "fi-fi",
3225
+ /** Query language value for Serbian (Bosnia and Herzegovina). */
3226
+ SrBa = "sr-ba",
3227
+ /** Query language value for Serbian (Montenegro). */
3228
+ SrMe = "sr-me",
3229
+ /** Query language value for Serbian (Serbia). */
3230
+ SrRs = "sr-rs",
3231
+ /** Query language value for Slovak (Slovakia). */
3232
+ SkSk = "sk-sk",
3233
+ /** Query language value for Norwegian (Norway). */
3234
+ NbNo = "nb-no",
3235
+ /** Query language value for Armenian (Armenia). */
3236
+ HyAm = "hy-am",
3237
+ /** Query language value for Bengali (India). */
3238
+ BnIn = "bn-in",
3239
+ /** Query language value for Basque. */
3240
+ EuEs = "eu-es",
3241
+ /** Query language value for Galician. */
3242
+ GlEs = "gl-es",
3243
+ /** Query language value for Gujarati (India). */
3244
+ GuIn = "gu-in",
3245
+ /** Query language value for Hebrew (Israel). */
3246
+ HeIl = "he-il",
3247
+ /** Query language value for Irish (Ireland). */
3248
+ GaIe = "ga-ie",
3249
+ /** Query language value for Kannada (India). */
3250
+ KnIn = "kn-in",
3251
+ /** Query language value for Malayalam (India). */
3252
+ MlIn = "ml-in",
3253
+ /** Query language value for Marathi (India). */
3254
+ MrIn = "mr-in",
3255
+ /** Query language value for Persian (U.A.E.). */
3256
+ FaAe = "fa-ae",
3257
+ /** Query language value for Punjabi (India). */
3258
+ PaIn = "pa-in",
3259
+ /** Query language value for Telugu (India). */
3260
+ TeIn = "te-in",
3261
+ /** Query language value for Urdu (Pakistan). */
3262
+ UrPk = "ur-pk"
3263
+ }
3264
+
3265
+ /** Known values of {@link QuerySpellerType} that the service accepts. */
3266
+ export declare enum KnownQuerySpellerType {
3267
+ /** Speller not enabled. */
3268
+ None = "none",
3269
+ /** Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter. */
3270
+ Lexicon = "lexicon"
3271
+ }
3272
+
2573
3273
  /** Known values of {@link RegexFlags} that the service accepts. */
2574
3274
  export declare enum KnownRegexFlags {
2575
3275
  /** Enables canonical equivalence. */
@@ -2624,6 +3324,42 @@ export declare enum KnownSearchIndexerDataSourceType {
2624
3324
  AdlsGen2 = "adlsgen2"
2625
3325
  }
2626
3326
 
3327
+ /** Known values of {@link SemanticErrorMode} that the service accepts. */
3328
+ export declare enum KnownSemanticErrorMode {
3329
+ /** If the semantic processing fails, partial results still return. The definition of partial results depends on what semantic step failed and what was the reason for failure. */
3330
+ Partial = "partial",
3331
+ /** If there is an exception during the semantic processing step, the query will fail and return the appropriate HTTP code depending on the error. */
3332
+ Fail = "fail"
3333
+ }
3334
+
3335
+ /** Known values of {@link SemanticErrorReason} that the service accepts. */
3336
+ export declare enum KnownSemanticErrorReason {
3337
+ /** If 'semanticMaxWaitInMilliseconds' was set and the semantic processing duration exceeded that value. Only the base results were returned. */
3338
+ MaxWaitExceeded = "maxWaitExceeded",
3339
+ /** The request was throttled. Only the base results were returned. */
3340
+ CapacityOverloaded = "capacityOverloaded",
3341
+ /** At least one step of the semantic process failed. */
3342
+ Transient = "transient"
3343
+ }
3344
+
3345
+ /** Known values of {@link SemanticFieldState} that the service accepts. */
3346
+ export declare enum KnownSemanticFieldState {
3347
+ /** The field was fully used for semantic enrichment. */
3348
+ Used = "used",
3349
+ /** The field was not used for semantic enrichment. */
3350
+ Unused = "unused",
3351
+ /** The field was partially used for semantic enrichment. */
3352
+ Partial = "partial"
3353
+ }
3354
+
3355
+ /** Known values of {@link SemanticSearchResultsType} that the service accepts. */
3356
+ export declare enum KnownSemanticSearchResultsType {
3357
+ /** Results without any semantic enrichment or reranking. */
3358
+ BaseResults = "baseResults",
3359
+ /** Results have been reranked with the reranker model and will include semantic captions. They will not include any answers, answers highlights or caption highlights. */
3360
+ RerankedResults = "rerankedResults"
3361
+ }
3362
+
2627
3363
  /** Known values of {@link SentimentSkillLanguage} that the service accepts. */
2628
3364
  export declare enum KnownSentimentSkillLanguage {
2629
3365
  /** Danish */
@@ -2658,6 +3394,14 @@ export declare enum KnownSentimentSkillLanguage {
2658
3394
  Tr = "tr"
2659
3395
  }
2660
3396
 
3397
+ /** Known values of {@link Speller} that the service accepts. */
3398
+ export declare enum KnownSpeller {
3399
+ /** Speller not enabled. */
3400
+ None = "none",
3401
+ /** Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter. */
3402
+ Lexicon = "lexicon"
3403
+ }
3404
+
2661
3405
  /** Known values of {@link SplitSkillLanguage} that the service accepts. */
2662
3406
  export declare enum KnownSplitSkillLanguage {
2663
3407
  /** Amharic */
@@ -2802,6 +3546,10 @@ export declare enum KnownTextTranslationSkillLanguage {
2802
3546
  Sw = "sw",
2803
3547
  /** Klingon */
2804
3548
  Tlh = "tlh",
3549
+ /** Klingon (Latin script) */
3550
+ TlhLatn = "tlh-Latn",
3551
+ /** Klingon (Klingon script) */
3552
+ TlhPiqd = "tlh-Piqd",
2805
3553
  /** Korean */
2806
3554
  Ko = "ko",
2807
3555
  /** Latvian */
@@ -2822,6 +3570,10 @@ export declare enum KnownTextTranslationSkillLanguage {
2822
3570
  Pl = "pl",
2823
3571
  /** Portuguese */
2824
3572
  Pt = "pt",
3573
+ /** Portuguese (Brazil) */
3574
+ PtBr = "pt-br",
3575
+ /** Portuguese (Portugal) */
3576
+ PtPT = "pt-PT",
2825
3577
  /** Queretaro Otomi */
2826
3578
  Otq = "otq",
2827
3579
  /** Romanian */
@@ -2863,11 +3615,21 @@ export declare enum KnownTextTranslationSkillLanguage {
2863
3615
  /** Welsh */
2864
3616
  Cy = "cy",
2865
3617
  /** Yucatec Maya */
2866
- Yua = "yua"
3618
+ Yua = "yua",
3619
+ /** Irish */
3620
+ Ga = "ga",
3621
+ /** Kannada */
3622
+ Kn = "kn",
3623
+ /** Maori */
3624
+ Mi = "mi",
3625
+ /** Malayalam */
3626
+ Ml = "ml",
3627
+ /** Punjabi */
3628
+ Pa = "pa"
2867
3629
  }
2868
3630
 
2869
3631
  /** Known values of {@link TokenFilterName} that the service accepts. */
2870
- export declare enum KnownTokenFilterNames {
3632
+ export declare enum KnownTokenFilterName {
2871
3633
  /** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html */
2872
3634
  ArabicNormalization = "arabic_normalization",
2873
3635
  /** Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html */
@@ -2900,7 +3662,7 @@ export declare enum KnownTokenFilterNames {
2900
3662
  Length = "length",
2901
3663
  /** Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html */
2902
3664
  Limit = "limit",
2903
- /** Normalizes token text to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.htm */
3665
+ /** Normalizes token text to lower case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
2904
3666
  Lowercase = "lowercase",
2905
3667
  /** Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html */
2906
3668
  NGram = "nGram_v2",
@@ -2932,42 +3694,294 @@ export declare enum KnownTokenFilterNames {
2932
3694
  Truncate = "truncate",
2933
3695
  /** Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html */
2934
3696
  Unique = "unique",
2935
- /** Normalizes token text to upper case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
3697
+ /** Normalizes token text to upper case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
2936
3698
  Uppercase = "uppercase",
2937
3699
  /** Splits words into subwords and performs optional transformations on subword groups. */
2938
3700
  WordDelimiter = "word_delimiter"
2939
3701
  }
2940
3702
 
2941
- /** Known values of {@link LexicalTokenizerName} that the service accepts. */
3703
+ /**
3704
+ * Defines values for TokenFilterName.
3705
+ * @readonly
3706
+ */
3707
+ export declare enum KnownTokenFilterNames {
3708
+ /**
3709
+ * A token filter that applies the Arabic normalizer to normalize the orthography. See
3710
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html
3711
+ */
3712
+ ArabicNormalization = "arabic_normalization",
3713
+ /**
3714
+ * Strips all characters after an apostrophe (including the apostrophe itself). See
3715
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html
3716
+ */
3717
+ Apostrophe = "apostrophe",
3718
+ /**
3719
+ * Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127
3720
+ * ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such
3721
+ * equivalents exist. See
3722
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html
3723
+ */
3724
+ AsciiFolding = "asciifolding",
3725
+ /**
3726
+ * Forms bigrams of CJK terms that are generated from StandardTokenizer. See
3727
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html
3728
+ */
3729
+ CjkBigram = "cjk_bigram",
3730
+ /**
3731
+ * Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic
3732
+ * Latin, and half-width Katakana variants into the equivalent Kana. See
3733
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html
3734
+ */
3735
+ CjkWidth = "cjk_width",
3736
+ /**
3737
+ * Removes English possessives, and dots from acronyms. See
3738
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html
3739
+ */
3740
+ Classic = "classic",
3741
+ /**
3742
+ * Construct bigrams for frequently occurring terms while indexing. Single terms are still
3743
+ * indexed too, with bigrams overlaid. See
3744
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html
3745
+ */
3746
+ CommonGram = "common_grams",
3747
+ /**
3748
+ * Generates n-grams of the given size(s) starting from the front or the back of an input token.
3749
+ * See
3750
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html
3751
+ */
3752
+ EdgeNGram = "edgeNGram_v2",
3753
+ /**
3754
+ * Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See
3755
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html
3756
+ */
3757
+ Elision = "elision",
3758
+ /**
3759
+ * Normalizes German characters according to the heuristics of the German2 snowball algorithm.
3760
+ * See
3761
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html
3762
+ */
3763
+ GermanNormalization = "german_normalization",
3764
+ /**
3765
+ * Normalizes text in Hindi to remove some differences in spelling variations. See
3766
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html
3767
+ */
3768
+ HindiNormalization = "hindi_normalization",
3769
+ /**
3770
+ * Normalizes the Unicode representation of text in Indian languages. See
3771
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html
3772
+ */
3773
+ IndicNormalization = "indic_normalization",
3774
+ /**
3775
+ * Emits each incoming token twice, once as keyword and once as non-keyword. See
3776
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html
3777
+ */
3778
+ KeywordRepeat = "keyword_repeat",
3779
+ /**
3780
+ * A high-performance kstem filter for English. See
3781
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html
3782
+ */
3783
+ KStem = "kstem",
3784
+ /**
3785
+ * Removes words that are too long or too short. See
3786
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html
3787
+ */
3788
+ Length = "length",
3789
+ /**
3790
+ * Limits the number of tokens while indexing. See
3791
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html
3792
+ */
3793
+ Limit = "limit",
3794
+ /**
3795
+ * Normalizes token text to lower case. See
3796
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm
3797
+ */
3798
+ Lowercase = "lowercase",
3799
+ /**
3800
+ * Generates n-grams of the given size(s). See
3801
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html
3802
+ */
3803
+ NGram = "nGram_v2",
3804
+ /**
3805
+ * Applies normalization for Persian. See
3806
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html
3807
+ */
3808
+ PersianNormalization = "persian_normalization",
3809
+ /**
3810
+ * Create tokens for phonetic matches. See
3811
+ * https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html
3812
+ */
3813
+ Phonetic = "phonetic",
3814
+ /**
3815
+ * Uses the Porter stemming algorithm to transform the token stream. See
3816
+ * http://tartarus.org/~martin/PorterStemmer
3817
+ */
3818
+ PorterStem = "porter_stem",
3819
+ /**
3820
+ * Reverses the token string. See
3821
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html
3822
+ */
3823
+ Reverse = "reverse",
3824
+ /**
3825
+ * Normalizes use of the interchangeable Scandinavian characters. See
3826
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html
3827
+ */
3828
+ ScandinavianNormalization = "scandinavian_normalization",
3829
+ /**
3830
+ * Folds Scandinavian characters åÅäæÄÆ-&gt;a and öÖøØ-&gt;o. It also discriminates against use
3831
+ * of double vowels aa, ae, ao, oe and oo, leaving just the first one. See
3832
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html
3833
+ */
3834
+ ScandinavianFoldingNormalization = "scandinavian_folding",
3835
+ /**
3836
+ * Creates combinations of tokens as a single token. See
3837
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html
3838
+ */
3839
+ Shingle = "shingle",
3840
+ /**
3841
+ * A filter that stems words using a Snowball-generated stemmer. See
3842
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html
3843
+ */
3844
+ Snowball = "snowball",
3845
+ /**
3846
+ * Normalizes the Unicode representation of Sorani text. See
3847
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html
3848
+ */
3849
+ SoraniNormalization = "sorani_normalization",
3850
+ /**
3851
+ * Language specific stemming filter. See
3852
+ * https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters
3853
+ */
3854
+ Stemmer = "stemmer",
3855
+ /**
3856
+ * Removes stop words from a token stream. See
3857
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html
3858
+ */
3859
+ Stopwords = "stopwords",
3860
+ /**
3861
+ * Trims leading and trailing whitespace from tokens. See
3862
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html
3863
+ */
3864
+ Trim = "trim",
3865
+ /**
3866
+ * Truncates the terms to a specific length. See
3867
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html
3868
+ */
3869
+ Truncate = "truncate",
3870
+ /**
3871
+ * Filters out tokens with same text as the previous token. See
3872
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html
3873
+ */
3874
+ Unique = "unique",
3875
+ /**
3876
+ * Normalizes token text to upper case. See
3877
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html
3878
+ */
3879
+ Uppercase = "uppercase",
3880
+ /**
3881
+ * Splits words into subwords and performs optional transformations on subword groups.
3882
+ */
3883
+ WordDelimiter = "word_delimiter"
3884
+ }
3885
+
3886
+ /**
3887
+ * Defines values for TokenizerName.
3888
+ * @readonly
3889
+ */
2942
3890
  export declare enum KnownTokenizerNames {
2943
- /** Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html */
3891
+ /**
3892
+ * Grammar-based tokenizer that is suitable for processing most European-language documents. See
3893
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html
3894
+ */
2944
3895
  Classic = "classic",
2945
- /** Tokenizes the input from an edge into n-grams of the given size(s). See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenizer.html */
3896
+ /**
3897
+ * Tokenizes the input from an edge into n-grams of the given size(s). See
3898
+ * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html
3899
+ */
2946
3900
  EdgeNGram = "edgeNGram",
2947
- /** Emits the entire input as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordTokenizer.html */
3901
+ /**
3902
+ * Emits the entire input as a single token. See
3903
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html
3904
+ */
2948
3905
  Keyword = "keyword_v2",
2949
- /** Divides text at non-letters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LetterTokenizer.html */
3906
+ /**
3907
+ * Divides text at non-letters. See
3908
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LetterTokenizer.html
3909
+ */
2950
3910
  Letter = "letter",
2951
- /** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseTokenizer.html */
3911
+ /**
3912
+ * Divides text at non-letters and converts them to lower case. See
3913
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseTokenizer.html
3914
+ */
2952
3915
  Lowercase = "lowercase",
2953
- /** Divides text using language-specific rules. */
3916
+ /**
3917
+ * Divides text using language-specific rules.
3918
+ */
2954
3919
  MicrosoftLanguageTokenizer = "microsoft_language_tokenizer",
2955
- /** Divides text using language-specific rules and reduces words to their base forms. */
3920
+ /**
3921
+ * Divides text using language-specific rules and reduces words to their base forms.
3922
+ */
2956
3923
  MicrosoftLanguageStemmingTokenizer = "microsoft_language_stemming_tokenizer",
2957
- /** Tokenizes the input into n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenizer.html */
3924
+ /**
3925
+ * Tokenizes the input into n-grams of the given size(s). See
3926
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html
3927
+ */
2958
3928
  NGram = "nGram",
2959
- /** Tokenizer for path-like hierarchies. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/path\/PathHierarchyTokenizer.html */
3929
+ /**
3930
+ * Tokenizer for path-like hierarchies. See
3931
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html
3932
+ */
2960
3933
  PathHierarchy = "path_hierarchy_v2",
2961
- /** Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/pattern\/PatternTokenizer.html */
3934
+ /**
3935
+ * Tokenizer that uses regex pattern matching to construct distinct tokens. See
3936
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html
3937
+ */
2962
3938
  Pattern = "pattern",
2963
- /** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/StandardTokenizer.html */
3939
+ /**
3940
+ * Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop
3941
+ * filter. See
3942
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html
3943
+ */
2964
3944
  Standard = "standard_v2",
2965
- /** Tokenizes urls and emails as one token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/UAX29URLEmailTokenizer.html */
3945
+ /**
3946
+ * Tokenizes urls and emails as one token. See
3947
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html
3948
+ */
2966
3949
  UaxUrlEmail = "uax_url_email",
2967
- /** Divides text at whitespace. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html */
3950
+ /**
3951
+ * Divides text at whitespace. See
3952
+ * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html
3953
+ */
2968
3954
  Whitespace = "whitespace"
2969
3955
  }
2970
3956
 
3957
+ /** Known values of {@link VectorQueryKind} that the service accepts. */
3958
+ export declare enum KnownVectorQueryKind {
3959
+ /** Vector query where a raw vector value is provided. */
3960
+ Vector = "vector",
3961
+ /** Vector query where a text value that needs to be vectorized is provided. */
3962
+ $DO_NOT_NORMALIZE$_text = "text"
3963
+ }
3964
+
3965
+ /** Known values of {@link VectorSearchCompressionKind} that the service accepts. */
3966
+ export declare enum KnownVectorSearchCompressionKind {
3967
+ /** Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. */
3968
+ ScalarQuantization = "scalarQuantization"
3969
+ }
3970
+
3971
+ /** Known values of {@link VectorSearchCompressionTargetDataType} that the service accepts. */
3972
+ export declare enum KnownVectorSearchCompressionTargetDataType {
3973
+ /** Int8 */
3974
+ Int8 = "int8"
3975
+ }
3976
+
3977
+ /** Known values of {@link VectorSearchVectorizerKind} that the service accepts. */
3978
+ export declare enum KnownVectorSearchVectorizerKind {
3979
+ /** Generate embeddings using an Azure OpenAI resource at query time. */
3980
+ AzureOpenAI = "azureOpenAI",
3981
+ /** Generate embeddings using a custom web endpoint at query time. */
3982
+ CustomWebApi = "customWebApi"
3983
+ }
3984
+
2971
3985
  /** Known values of {@link VisualFeature} that the service accepts. */
2972
3986
  export declare enum KnownVisualFeature {
2973
3987
  /** Visual features recognized as adult persons. */
@@ -3112,6 +4126,24 @@ export declare type LexicalAnalyzer = CustomAnalyzer | PatternAnalyzer | LuceneS
3112
4126
  */
3113
4127
  export declare type LexicalAnalyzerName = string;
3114
4128
 
4129
+ /**
4130
+ * Contains the possible cases for LexicalNormalizer.
4131
+ */
4132
+ export declare type LexicalNormalizer = CustomNormalizer;
4133
+
4134
+ /**
4135
+ * Defines values for LexicalNormalizerName. \
4136
+ * {@link KnownLexicalNormalizerName} can be used interchangeably with LexicalNormalizerName,
4137
+ * this enum contains the known values that the service supports.
4138
+ * ### Known values supported by the service
4139
+ * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \
4140
+ * **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \
4141
+ * **lowercase**: Normalizes token text to lowercase. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \
4142
+ * **standard**: Standard normalizer, which consists of lowercase and asciifolding. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \
4143
+ * **uppercase**: Normalizes token text to uppercase. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html
4144
+ */
4145
+ export declare type LexicalNormalizerName = string;
4146
+
3115
4147
  /**
3116
4148
  * Contains the possible cases for Tokenizer.
3117
4149
  */
@@ -3148,6 +4180,23 @@ export declare interface LimitTokenFilter extends BaseTokenFilter {
3148
4180
  consumeAllTokens?: boolean;
3149
4181
  }
3150
4182
 
4183
+ /**
4184
+ * Defines values for LineEnding. \
4185
+ * {@link KnownLineEnding} can be used interchangeably with LineEnding,
4186
+ * this enum contains the known values that the service supports.
4187
+ * ### Known values supported by the service
4188
+ * **space**: Lines are separated by a single space character. \
4189
+ * **carriageReturn**: Lines are separated by a carriage return ('\r') character. \
4190
+ * **lineFeed**: Lines are separated by a single line feed ('\n') character. \
4191
+ * **carriageReturnLineFeed**: Lines are separated by a carriage return and a line feed ('\r\n') character.
4192
+ */
4193
+ export declare type LineEnding = string;
4194
+
4195
+ /**
4196
+ * Options for list aliases operation.
4197
+ */
4198
+ export declare type ListAliasesOptions = OperationOptions;
4199
+
3151
4200
  /**
3152
4201
  * Options for a list data sources operation.
3153
4202
  */
@@ -3297,6 +4346,12 @@ export declare type MicrosoftTokenizerLanguage = "bangla" | "bulgarian" | "catal
3297
4346
  */
3298
4347
  export declare type NarrowedModel<TModel extends object, TFields extends SelectFields<TModel> = SelectFields<TModel>> = (<T>() => T extends TModel ? true : false) extends <T>() => T extends never ? true : false ? TModel : (<T>() => T extends TModel ? true : false) extends <T>() => T extends object ? true : false ? TModel : (<T>() => T extends TModel ? true : false) extends <T>() => T extends any ? true : false ? TModel : (<T>() => T extends TModel ? true : false) extends <T>() => T extends unknown ? true : false ? TModel : (<T>() => T extends TFields ? true : false) extends <T>() => T extends never ? true : false ? never : (<T>() => T extends TFields ? true : false) extends <T>() => T extends SelectFields<TModel> ? true : false ? TModel : SearchPick<TModel, TFields>;
3299
4348
 
4349
+ /** Defines a data deletion detection policy utilizing Azure Blob Storage's native soft delete feature for deletion detection. */
4350
+ export declare interface NativeBlobSoftDeleteDeletionDetectionPolicy extends BaseDataDeletionDetectionPolicy {
4351
+ /** Polymorphic discriminator, which specifies the different types this object can be */
4352
+ odatatype: "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy";
4353
+ }
4354
+
3300
4355
  /**
3301
4356
  * Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene.
3302
4357
  */
@@ -3344,10 +4399,6 @@ export declare interface OcrSkill extends BaseSearchIndexerSkill {
3344
4399
  shouldDetectOrientation?: boolean;
3345
4400
  }
3346
4401
 
3347
- /**
3348
- * Defines supported languages for {@link OcrSkill}
3349
- * {@link KnownOcrSkillLanguage} can be used interchangeably with this type
3350
- */
3351
4402
  export declare type OcrSkillLanguage = "af" | "sq" | "anp" | "ar" | "ast" | "awa" | "az" | "bfy" | "eu" | "be" | "be-cyrl" | "be-latn" | "bho" | "bi" | "brx" | "bs" | "bra" | "br" | "bg" | "bns" | "bua" | "ca" | "ceb" | "rab" | "ch" | "hne" | "zh-Hans" | "zh-Hant" | "kw" | "co" | "crh" | "hr" | "cs" | "da" | "prs" | "dhi" | "doi" | "nl" | "en" | "myv" | "et" | "fo" | "fj" | "fil" | "fi" | "fr" | "fur" | "gag" | "gl" | "de" | "gil" | "gon" | "el" | "kl" | "gvr" | "ht" | "hlb" | "hni" | "bgc" | "haw" | "hi" | "mww" | "hoc" | "hu" | "is" | "smn" | "id" | "ia" | "iu" | "ga" | "it" | "ja" | "Jns" | "jv" | "kea" | "kac" | "xnr" | "krc" | "kaa-cyrl" | "kaa" | "csb" | "kk-cyrl" | "kk-latn" | "klr" | "kha" | "quc" | "ko" | "kfq" | "kpy" | "kos" | "kum" | "ku-arab" | "ku-latn" | "kru" | "ky" | "lkt" | "la" | "lt" | "dsb" | "smj" | "lb" | "bfz" | "ms" | "mt" | "kmj" | "gv" | "mi" | "mr" | "mn" | "cnr-cyrl" | "cnr-latn" | "nap" | "ne" | "niu" | "nog" | "sme" | "nb" | "no" | "oc" | "os" | "ps" | "fa" | "pl" | "pt" | "pa" | "ksh" | "ro" | "rm" | "ru" | "sck" | "sm" | "sa" | "sat" | "sco" | "gd" | "sr" | "sr-Cyrl" | "sr-Latn" | "xsr" | "srx" | "sms" | "sk" | "sl" | "so" | "sma" | "es" | "sw" | "sv" | "tg" | "tt" | "tet" | "thf" | "to" | "tr" | "tk" | "tyv" | "hsb" | "ur" | "ug" | "uz-arab" | "uz-cyrl" | "uz" | "vo" | "wae" | "cy" | "fy" | "yua" | "za" | "zu" | "unk";
3352
4403
 
3353
4404
  /**
@@ -3518,12 +4569,6 @@ export declare interface PIIDetectionSkill extends BaseSearchIndexerSkill {
3518
4569
  domain?: string;
3519
4570
  }
3520
4571
 
3521
- /**
3522
- * Defines values for PIIDetectionSkillMaskingMode.
3523
- * ### Known values supported by the service
3524
- * **none**: No masking occurs and the maskedText output will not be returned.
3525
- * **replace**: Replaces the detected entities with the character given in the maskingCharacter parameter. The character will be repeated to the length of the detected entity so that the offsets will correctly correspond to both the input text as well as the output maskedText.
3526
- */
3527
4572
  export declare type PIIDetectionSkillMaskingMode = "none" | "replace";
3528
4573
 
3529
4574
  /**
@@ -3567,7 +4612,7 @@ export declare interface QueryAnswerResult {
3567
4612
  */
3568
4613
  export declare type QueryCaption = ExtractiveQueryCaption;
3569
4614
 
3570
- /** Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type `semantic`. */
4615
+ /** Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type 'semantic'.. */
3571
4616
  export declare interface QueryCaptionResult {
3572
4617
  /** Describes unknown properties. The value of an unknown property can be of "any" type. */
3573
4618
  [property: string]: any;
@@ -3583,28 +4628,169 @@ export declare interface QueryCaptionResult {
3583
4628
  readonly highlights?: string;
3584
4629
  }
3585
4630
 
3586
- /** Defines values for QueryType. */
3587
- export declare type QueryType = "simple" | "full" | "semantic";
4631
+ /**
4632
+ * Defines values for QueryDebugMode. \
4633
+ * {@link KnownQueryDebugMode} can be used interchangeably with QueryDebugMode,
4634
+ * this enum contains the known values that the service supports.
4635
+ * ### Known values supported by the service
4636
+ * **disabled**: No query debugging information will be returned. \
4637
+ * **semantic**: Allows the user to further explore their reranked results.
4638
+ */
4639
+ export declare type QueryDebugMode = string;
4640
+
4641
+ /**
4642
+ * Defines values for QueryLanguage. \
4643
+ * {@link KnownQueryLanguage} can be used interchangeably with QueryLanguage,
4644
+ * this enum contains the known values that the service supports.
4645
+ * ### Known values supported by the service
4646
+ * **none**: Query language not specified. \
4647
+ * **en-us**: Query language value for English (United States). \
4648
+ * **en-gb**: Query language value for English (Great Britain). \
4649
+ * **en-in**: Query language value for English (India). \
4650
+ * **en-ca**: Query language value for English (Canada). \
4651
+ * **en-au**: Query language value for English (Australia). \
4652
+ * **fr-fr**: Query language value for French (France). \
4653
+ * **fr-ca**: Query language value for French (Canada). \
4654
+ * **de-de**: Query language value for German (Germany). \
4655
+ * **es-es**: Query language value for Spanish (Spain). \
4656
+ * **es-mx**: Query language value for Spanish (Mexico). \
4657
+ * **zh-cn**: Query language value for Chinese (China). \
4658
+ * **zh-tw**: Query language value for Chinese (Taiwan). \
4659
+ * **pt-br**: Query language value for Portuguese (Brazil). \
4660
+ * **pt-pt**: Query language value for Portuguese (Portugal). \
4661
+ * **it-it**: Query language value for Italian (Italy). \
4662
+ * **ja-jp**: Query language value for Japanese (Japan). \
4663
+ * **ko-kr**: Query language value for Korean (Korea). \
4664
+ * **ru-ru**: Query language value for Russian (Russia). \
4665
+ * **cs-cz**: Query language value for Czech (Czech Republic). \
4666
+ * **nl-be**: Query language value for Dutch (Belgium). \
4667
+ * **nl-nl**: Query language value for Dutch (Netherlands). \
4668
+ * **hu-hu**: Query language value for Hungarian (Hungary). \
4669
+ * **pl-pl**: Query language value for Polish (Poland). \
4670
+ * **sv-se**: Query language value for Swedish (Sweden). \
4671
+ * **tr-tr**: Query language value for Turkish (Turkey). \
4672
+ * **hi-in**: Query language value for Hindi (India). \
4673
+ * **ar-sa**: Query language value for Arabic (Saudi Arabia). \
4674
+ * **ar-eg**: Query language value for Arabic (Egypt). \
4675
+ * **ar-ma**: Query language value for Arabic (Morocco). \
4676
+ * **ar-kw**: Query language value for Arabic (Kuwait). \
4677
+ * **ar-jo**: Query language value for Arabic (Jordan). \
4678
+ * **da-dk**: Query language value for Danish (Denmark). \
4679
+ * **no-no**: Query language value for Norwegian (Norway). \
4680
+ * **bg-bg**: Query language value for Bulgarian (Bulgaria). \
4681
+ * **hr-hr**: Query language value for Croatian (Croatia). \
4682
+ * **hr-ba**: Query language value for Croatian (Bosnia and Herzegovina). \
4683
+ * **ms-my**: Query language value for Malay (Malaysia). \
4684
+ * **ms-bn**: Query language value for Malay (Brunei Darussalam). \
4685
+ * **sl-sl**: Query language value for Slovenian (Slovenia). \
4686
+ * **ta-in**: Query language value for Tamil (India). \
4687
+ * **vi-vn**: Query language value for Vietnamese (Viet Nam). \
4688
+ * **el-gr**: Query language value for Greek (Greece). \
4689
+ * **ro-ro**: Query language value for Romanian (Romania). \
4690
+ * **is-is**: Query language value for Icelandic (Iceland). \
4691
+ * **id-id**: Query language value for Indonesian (Indonesia). \
4692
+ * **th-th**: Query language value for Thai (Thailand). \
4693
+ * **lt-lt**: Query language value for Lithuanian (Lithuania). \
4694
+ * **uk-ua**: Query language value for Ukrainian (Ukraine). \
4695
+ * **lv-lv**: Query language value for Latvian (Latvia). \
4696
+ * **et-ee**: Query language value for Estonian (Estonia). \
4697
+ * **ca-es**: Query language value for Catalan. \
4698
+ * **fi-fi**: Query language value for Finnish (Finland). \
4699
+ * **sr-ba**: Query language value for Serbian (Bosnia and Herzegovina). \
4700
+ * **sr-me**: Query language value for Serbian (Montenegro). \
4701
+ * **sr-rs**: Query language value for Serbian (Serbia). \
4702
+ * **sk-sk**: Query language value for Slovak (Slovakia). \
4703
+ * **nb-no**: Query language value for Norwegian (Norway). \
4704
+ * **hy-am**: Query language value for Armenian (Armenia). \
4705
+ * **bn-in**: Query language value for Bengali (India). \
4706
+ * **eu-es**: Query language value for Basque. \
4707
+ * **gl-es**: Query language value for Galician. \
4708
+ * **gu-in**: Query language value for Gujarati (India). \
4709
+ * **he-il**: Query language value for Hebrew (Israel). \
4710
+ * **ga-ie**: Query language value for Irish (Ireland). \
4711
+ * **kn-in**: Query language value for Kannada (India). \
4712
+ * **ml-in**: Query language value for Malayalam (India). \
4713
+ * **mr-in**: Query language value for Marathi (India). \
4714
+ * **fa-ae**: Query language value for Persian (U.A.E.). \
4715
+ * **pa-in**: Query language value for Punjabi (India). \
4716
+ * **te-in**: Query language value for Telugu (India). \
4717
+ * **ur-pk**: Query language value for Urdu (Pakistan).
4718
+ */
4719
+ export declare type QueryLanguage = string;
4720
+
4721
+ /** The raw concatenated strings that were sent to the semantic enrichment process. */
4722
+ export declare interface QueryResultDocumentRerankerInput {
4723
+ /**
4724
+ * The raw string for the title field that was used for semantic enrichment.
4725
+ * NOTE: This property will not be serialized. It can only be populated by the server.
4726
+ */
4727
+ readonly title?: string;
4728
+ /**
4729
+ * The raw concatenated strings for the content fields that were used for semantic enrichment.
4730
+ * NOTE: This property will not be serialized. It can only be populated by the server.
4731
+ */
4732
+ readonly content?: string;
4733
+ /**
4734
+ * The raw concatenated strings for the keyword fields that were used for semantic enrichment.
4735
+ * NOTE: This property will not be serialized. It can only be populated by the server.
4736
+ */
4737
+ readonly keywords?: string;
4738
+ }
4739
+
4740
+ /** Description of fields that were sent to the semantic enrichment process, as well as how they were used */
4741
+ export declare interface QueryResultDocumentSemanticField {
4742
+ /**
4743
+ * The name of the field that was sent to the semantic enrichment process
4744
+ * NOTE: This property will not be serialized. It can only be populated by the server.
4745
+ */
4746
+ readonly name?: string;
4747
+ /**
4748
+ * The way the field was used for the semantic enrichment process (fully used, partially used, or unused)
4749
+ * NOTE: This property will not be serialized. It can only be populated by the server.
4750
+ */
4751
+ readonly state?: SemanticFieldState;
4752
+ }
3588
4753
 
3589
4754
  /**
3590
- * Defines flags for regex pattern matching
4755
+ * Defines values for QuerySpellerType. \
4756
+ * {@link KnownQuerySpellerType} can be used interchangeably with QuerySpellerType,
4757
+ * this enum contains the known values that the service supports.
3591
4758
  * ### Known values supported by the service
3592
- * **CANON_EQ**: Enables canonical equivalence.
3593
- * **CASE_INSENSITIVE**: Enables case-insensitive matching.
3594
- * **COMMENTS**: Permits whitespace and comments in the pattern.
3595
- * **DOTALL**: Enables dotall mode.
3596
- * **LITERAL**: Enables literal parsing of the pattern.
3597
- * **MULTILINE**: Enables multiline mode.
3598
- * **UNICODE_CASE**: Enables Unicode-aware case folding.
3599
- * **UNIX_LINES**: Enables Unix lines mode.
4759
+ * **none**: Speller not enabled. \
4760
+ * **lexicon**: Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter.
3600
4761
  */
4762
+ export declare type QuerySpellerType = string;
4763
+
4764
+ /** Defines values for QueryType. */
4765
+ export declare type QueryType = "simple" | "full" | "semantic";
4766
+
3601
4767
  export declare type RegexFlags = "CANON_EQ" | "CASE_INSENSITIVE" | "COMMENTS" | "DOTALL" | "LITERAL" | "MULTILINE" | "UNICODE_CASE" | "UNIX_LINES";
3602
4768
 
4769
+ /**
4770
+ * Options for reset docs operation.
4771
+ */
4772
+ export declare interface ResetDocumentsOptions extends OperationOptions {
4773
+ /** document keys to be reset */
4774
+ documentKeys?: string[];
4775
+ /** datasource document identifiers to be reset */
4776
+ datasourceDocumentIds?: string[];
4777
+ /** If false, keys or ids will be appended to existing ones. If true, only the keys or ids in this payload will be queued to be re-ingested. */
4778
+ overwrite?: boolean;
4779
+ }
4780
+
3603
4781
  /**
3604
4782
  * Options for reset indexer operation.
3605
4783
  */
3606
4784
  export declare type ResetIndexerOptions = OperationOptions;
3607
4785
 
4786
+ /**
4787
+ * Options for reset skills operation.
4788
+ */
4789
+ export declare interface ResetSkillsOptions extends OperationOptions {
4790
+ /** the names of skills to be reset. */
4791
+ skillNames?: string[];
4792
+ }
4793
+
3608
4794
  /** Represents a resource's usage and quota. */
3609
4795
  export declare interface ResourceCounter {
3610
4796
  /** The resource usage amount. */
@@ -3618,6 +4804,20 @@ export declare interface ResourceCounter {
3618
4804
  */
3619
4805
  export declare type RunIndexerOptions = OperationOptions;
3620
4806
 
4807
+ /** Contains configuration options specific to the scalar quantization compression method used during indexing and querying. */
4808
+ export declare interface ScalarQuantizationCompressionConfiguration extends BaseVectorSearchCompressionConfiguration {
4809
+ /** Polymorphic discriminator, which specifies the different types this object can be */
4810
+ kind: "scalarQuantization";
4811
+ /** Contains the parameters specific to Scalar Quantization. */
4812
+ parameters?: ScalarQuantizationParameters;
4813
+ }
4814
+
4815
+ /** Contains the parameters specific to Scalar Quantization. */
4816
+ export declare interface ScalarQuantizationParameters {
4817
+ /** The quantized data type of compressed vector values. */
4818
+ quantizedDataType?: VectorSearchCompressionTargetDataType;
4819
+ }
4820
+
3621
4821
  /**
3622
4822
  * Contains the possible cases for ScoringFunction.
3623
4823
  */
@@ -3656,6 +4856,16 @@ export declare interface ScoringProfile {
3656
4856
  /** Defines values for ScoringStatistics. */
3657
4857
  export declare type ScoringStatistics = "local" | "global";
3658
4858
 
4859
+ /** Represents an index alias, which describes a mapping from the alias name to an index. The alias name can be used in place of the index name for supported operations. */
4860
+ export declare interface SearchAlias {
4861
+ /** The name of the alias. */
4862
+ name: string;
4863
+ /** The name of the index this alias maps to. Only one index name may be specified. */
4864
+ indexes: string[];
4865
+ /** The ETag of the alias. */
4866
+ etag?: string;
4867
+ }
4868
+
3659
4869
  /**
3660
4870
  * Class used to perform operations against a search index,
3661
4871
  * including querying documents in the index as well as
@@ -3679,7 +4889,15 @@ export declare class SearchClient<TModel extends object> implements IndexDocumen
3679
4889
  * The name of the index
3680
4890
  */
3681
4891
  readonly indexName: string;
3682
- /* Excluded from this release type: client */
4892
+ /**
4893
+ * @hidden
4894
+ * A reference to the auto-generated SearchClient
4895
+ */
4896
+ private readonly client;
4897
+ /**
4898
+ * A reference to the internal HTTP pipeline for use with raw requests
4899
+ */
4900
+ readonly pipeline: Pipeline;
3683
4901
  /**
3684
4902
  * Creates an instance of SearchClient.
3685
4903
  *
@@ -3886,9 +5104,10 @@ export declare class SearchClient<TModel extends object> implements IndexDocumen
3886
5104
  private convertSelect;
3887
5105
  private convertVectorQueryFields;
3888
5106
  private convertSearchFields;
5107
+ private convertSemanticFields;
3889
5108
  private convertOrderBy;
3890
5109
  private convertQueryAnswers;
3891
- private convertCaptions;
5110
+ private convertQueryCaptions;
3892
5111
  private convertVectorQuery;
3893
5112
  }
3894
5113
 
@@ -4001,12 +5220,13 @@ export declare type SearchFieldArray<TModel extends object = object> = (<T>() =>
4001
5220
  * Possible values include: 'Edm.String', 'Edm.Int32', 'Edm.Int64', 'Edm.Double', 'Edm.Boolean',
4002
5221
  * 'Edm.DateTimeOffset', 'Edm.GeographyPoint', 'Collection(Edm.String)', 'Collection(Edm.Int32)',
4003
5222
  * 'Collection(Edm.Int64)', 'Collection(Edm.Double)', 'Collection(Edm.Boolean)',
4004
- * 'Collection(Edm.DateTimeOffset)', 'Collection(Edm.GeographyPoint)', 'Collection(Edm.Single)'
5223
+ * 'Collection(Edm.DateTimeOffset)', 'Collection(Edm.GeographyPoint)', 'Collection(Edm.Single)',
5224
+ * 'Collection(Edm.Half)', 'Collection(Edm.Int16)', 'Collection(Edm.SByte)'
4005
5225
  *
4006
5226
  * NB: `Edm.Single` alone is not a valid data type. It must be used as part of a collection type.
4007
5227
  * @readonly
4008
5228
  */
4009
- export declare type SearchFieldDataType = "Edm.String" | "Edm.Int32" | "Edm.Int64" | "Edm.Double" | "Edm.Boolean" | "Edm.DateTimeOffset" | "Edm.GeographyPoint" | "Collection(Edm.String)" | "Collection(Edm.Int32)" | "Collection(Edm.Int64)" | "Collection(Edm.Double)" | "Collection(Edm.Boolean)" | "Collection(Edm.DateTimeOffset)" | "Collection(Edm.GeographyPoint)" | "Collection(Edm.Single)";
5229
+ export declare type SearchFieldDataType = "Edm.String" | "Edm.Int32" | "Edm.Int64" | "Edm.Double" | "Edm.Boolean" | "Edm.DateTimeOffset" | "Edm.GeographyPoint" | "Collection(Edm.String)" | "Collection(Edm.Int32)" | "Collection(Edm.Int64)" | "Collection(Edm.Double)" | "Collection(Edm.Boolean)" | "Collection(Edm.DateTimeOffset)" | "Collection(Edm.GeographyPoint)" | "Collection(Edm.Single)" | "Collection(Edm.Half)" | "Collection(Edm.Int16)" | "Collection(Edm.SByte)";
4010
5230
 
4011
5231
  /**
4012
5232
  * Represents a search index definition, which describes the fields and search behavior of an
@@ -4055,6 +5275,10 @@ export declare interface SearchIndex {
4055
5275
  * The character filters for the index.
4056
5276
  */
4057
5277
  charFilters?: CharFilter[];
5278
+ /**
5279
+ * The normalizers for the index.
5280
+ */
5281
+ normalizers?: LexicalNormalizer[];
4058
5282
  /**
4059
5283
  * A description of an encryption key that you create in Azure Key Vault. This key is used to
4060
5284
  * provide an additional level of encryption-at-rest for your data when you want full assurance
@@ -4086,6 +5310,11 @@ export declare interface SearchIndex {
4086
5310
  etag?: string;
4087
5311
  }
4088
5312
 
5313
+ /**
5314
+ * Search Alias object.
5315
+ */
5316
+ export declare type SearchIndexAlias = SearchAlias;
5317
+
4089
5318
  /**
4090
5319
  * Class to perform operations to manage
4091
5320
  * (create, update, list/delete)
@@ -4105,7 +5334,15 @@ export declare class SearchIndexClient {
4105
5334
  * The endpoint of the search service
4106
5335
  */
4107
5336
  readonly endpoint: string;
4108
- /* Excluded from this release type: client */
5337
+ /**
5338
+ * @hidden
5339
+ * A reference to the auto-generated SearchServiceClient
5340
+ */
5341
+ private readonly client;
5342
+ /**
5343
+ * A reference to the internal HTTP pipeline for use with raw requests
5344
+ */
5345
+ readonly pipeline: Pipeline;
4109
5346
  /**
4110
5347
  * Used to authenticate requests to the service.
4111
5348
  */
@@ -4138,6 +5375,13 @@ export declare class SearchIndexClient {
4138
5375
  * @param options - Options to the list index operation.
4139
5376
  */
4140
5377
  listIndexes(options?: ListIndexesOptions): IndexIterator;
5378
+ private listAliasesPage;
5379
+ private listAliasesAll;
5380
+ /**
5381
+ * Lists all aliases available for a search service.
5382
+ * @param options - The options parameters.
5383
+ */
5384
+ listAliases(options?: ListAliasesOptions): AliasIterator;
4141
5385
  private listIndexesNamesPage;
4142
5386
  private listIndexesNamesAll;
4143
5387
  /**
@@ -4203,6 +5447,31 @@ export declare class SearchIndexClient {
4203
5447
  * @param options - Additional optional arguments.
4204
5448
  */
4205
5449
  deleteSynonymMap(synonymMap: string | SynonymMap, options?: DeleteSynonymMapOptions): Promise<void>;
5450
+ /**
5451
+ * Creates a new search alias or updates an alias if it already exists.
5452
+ * @param alias - The definition of the alias to create or update.
5453
+ * @param options - The options parameters.
5454
+ */
5455
+ createOrUpdateAlias(alias: SearchIndexAlias, options?: CreateOrUpdateAliasOptions): Promise<SearchIndexAlias>;
5456
+ /**
5457
+ * Creates a new search alias.
5458
+ * @param alias - The definition of the alias to create.
5459
+ * @param options - The options parameters.
5460
+ */
5461
+ createAlias(alias: SearchIndexAlias, options?: CreateAliasOptions): Promise<SearchIndexAlias>;
5462
+ /**
5463
+ * Deletes a search alias and its associated mapping to an index. This operation is permanent, with no
5464
+ * recovery option. The mapped index is untouched by this operation.
5465
+ * @param alias - Alias/Name name of the alias to delete.
5466
+ * @param options - The options parameters.
5467
+ */
5468
+ deleteAlias(alias: string | SearchIndexAlias, options?: DeleteAliasOptions): Promise<void>;
5469
+ /**
5470
+ * Retrieves an alias definition.
5471
+ * @param aliasName - The name of the alias to retrieve.
5472
+ * @param options - The options parameters.
5473
+ */
5474
+ getAlias(aliasName: string, options?: GetAliasOptions): Promise<SearchIndexAlias>;
4206
5475
  /**
4207
5476
  * Retrieves statistics about an index, such as the count of documents and the size
4208
5477
  * of index storage.
@@ -4316,6 +5585,29 @@ export declare interface SearchIndexer {
4316
5585
  * paid services created on or after January 1, 2019.
4317
5586
  */
4318
5587
  encryptionKey?: SearchResourceEncryptionKey;
5588
+ /**
5589
+ * Adds caching to an enrichment pipeline to allow for incremental modification steps without
5590
+ * having to rebuild the index every time.
5591
+ */
5592
+ cache?: SearchIndexerCache;
5593
+ }
5594
+
5595
+ export declare interface SearchIndexerCache {
5596
+ /**
5597
+ * The connection string to the storage account where the cache data will be persisted.
5598
+ */
5599
+ storageConnectionString?: string;
5600
+ /**
5601
+ * Specifies whether incremental reprocessing is enabled.
5602
+ */
5603
+ enableReprocessing?: boolean;
5604
+ /** The user-assigned managed identity used for connections to the enrichment cache. If the
5605
+ * connection string indicates an identity (ResourceId) and it's not specified, the
5606
+ * system-assigned managed identity is used. On updates to the indexer, if the identity is
5607
+ * unspecified, the value remains unchanged. If set to "none", the value of this property is
5608
+ * cleared.
5609
+ */
5610
+ identity?: SearchIndexerDataIdentity;
4319
5611
  }
4320
5612
 
4321
5613
  /**
@@ -4337,7 +5629,15 @@ export declare class SearchIndexerClient {
4337
5629
  * The endpoint of the search service
4338
5630
  */
4339
5631
  readonly endpoint: string;
4340
- /* Excluded from this release type: client */
5632
+ /**
5633
+ * @hidden
5634
+ * A reference to the auto-generated SearchServiceClient
5635
+ */
5636
+ private readonly client;
5637
+ /**
5638
+ * A reference to the internal HTTP pipeline for use with raw requests
5639
+ */
5640
+ readonly pipeline: Pipeline;
4341
5641
  /**
4342
5642
  * Creates an instance of SearchIndexerClient.
4343
5643
  *
@@ -4475,6 +5775,19 @@ export declare class SearchIndexerClient {
4475
5775
  * @param options - Additional optional arguments.
4476
5776
  */
4477
5777
  runIndexer(indexerName: string, options?: RunIndexerOptions): Promise<void>;
5778
+ /**
5779
+ * Resets specific documents in the datasource to be selectively re-ingested by the indexer.
5780
+ * @param indexerName - The name of the indexer to reset documents for.
5781
+ * @param options - Additional optional arguments.
5782
+ */
5783
+ resetDocuments(indexerName: string, options?: ResetDocumentsOptions): Promise<void>;
5784
+ /**
5785
+ * Reset an existing skillset in a search service.
5786
+ * @param skillsetName - The name of the skillset to reset.
5787
+ * @param skillNames - The names of skills to reset.
5788
+ * @param options - The options parameters.
5789
+ */
5790
+ resetSkills(skillsetName: string, options?: ResetSkillsOptions): Promise<void>;
4478
5791
  }
4479
5792
 
4480
5793
  /**
@@ -4506,6 +5819,17 @@ export declare interface SearchIndexerDataContainer {
4506
5819
  query?: string;
4507
5820
  }
4508
5821
 
5822
+ /**
5823
+ * Contains the possible cases for SearchIndexerDataIdentity.
5824
+ */
5825
+ export declare type SearchIndexerDataIdentity = SearchIndexerDataNoneIdentity | SearchIndexerDataUserAssignedIdentity;
5826
+
5827
+ /** Clears the identity property of a datasource. */
5828
+ export declare interface SearchIndexerDataNoneIdentity extends BaseSearchIndexerDataIdentity {
5829
+ /** Polymorphic discriminator, which specifies the different types this object can be */
5830
+ odatatype: "#Microsoft.Azure.Search.DataNoneIdentity";
5831
+ }
5832
+
4509
5833
  /**
4510
5834
  * Represents a datasource definition, which can be used to configure an indexer.
4511
5835
  */
@@ -4531,6 +5855,12 @@ export declare interface SearchIndexerDataSourceConnection {
4531
5855
  * The data container for the datasource.
4532
5856
  */
4533
5857
  container: SearchIndexerDataContainer;
5858
+ /**
5859
+ * An explicit managed identity to use for this datasource. If not specified and the connection
5860
+ * string is a managed identity, the system-assigned managed identity is used. If not specified,
5861
+ * the value remains unchanged. If "none" is specified, the value of this property is cleared.
5862
+ */
5863
+ identity?: SearchIndexerDataIdentity;
4534
5864
  /**
4535
5865
  * The data change detection policy for the datasource.
4536
5866
  */
@@ -4559,6 +5889,14 @@ export declare interface SearchIndexerDataSourceConnection {
4559
5889
 
4560
5890
  export declare type SearchIndexerDataSourceType = "azuresql" | "cosmosdb" | "azureblob" | "azuretable" | "mysql" | "adlsgen2";
4561
5891
 
5892
+ /** Specifies the identity for a datasource to use. */
5893
+ export declare interface SearchIndexerDataUserAssignedIdentity extends BaseSearchIndexerDataIdentity {
5894
+ /** Polymorphic discriminator, which specifies the different types this object can be */
5895
+ odatatype: "#Microsoft.Azure.Search.DataUserAssignedIdentity";
5896
+ /** The fully qualified Azure resource Id of a user assigned managed identity typically in the form "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" that should have been assigned to the search service. */
5897
+ userAssignedIdentity: string;
5898
+ }
5899
+
4562
5900
  /** Represents an item- or document-level indexing error. */
4563
5901
  export declare interface SearchIndexerError {
4564
5902
  /**
@@ -4593,6 +5931,34 @@ export declare interface SearchIndexerError {
4593
5931
  readonly documentationLink?: string;
4594
5932
  }
4595
5933
 
5934
+ /** Definition of additional projections to secondary search indexes. */
5935
+ export declare interface SearchIndexerIndexProjections {
5936
+ /** A list of projections to be performed to secondary search indexes. */
5937
+ selectors: SearchIndexerIndexProjectionSelector[];
5938
+ /** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */
5939
+ parameters?: SearchIndexerIndexProjectionsParameters;
5940
+ }
5941
+
5942
+ /** Description for what data to store in the designated search index. */
5943
+ export declare interface SearchIndexerIndexProjectionSelector {
5944
+ /** Name of the search index to project to. Must have a key field with the 'keyword' analyzer set. */
5945
+ targetIndexName: string;
5946
+ /** Name of the field in the search index to map the parent document's key value to. Must be a string field that is filterable and not the key field. */
5947
+ parentKeyFieldName: string;
5948
+ /** Source context for the projections. Represents the cardinality at which the document will be split into multiple sub documents. */
5949
+ sourceContext: string;
5950
+ /** Mappings for the projection, or which source should be mapped to which field in the target index. */
5951
+ mappings: InputFieldMappingEntry[];
5952
+ }
5953
+
5954
+ /** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */
5955
+ export declare interface SearchIndexerIndexProjectionsParameters {
5956
+ /** Describes unknown properties.*/
5957
+ [property: string]: unknown;
5958
+ /** Defines behavior of the index projections in relation to the rest of the indexer. */
5959
+ projectionMode?: IndexProjectionMode;
5960
+ }
5961
+
4596
5962
  /**
4597
5963
  * Definition of additional projections to azure blob, table, or files, of enriched data.
4598
5964
  */
@@ -4605,6 +5971,14 @@ export declare interface SearchIndexerKnowledgeStore {
4605
5971
  * A list of additional projections to perform during indexing.
4606
5972
  */
4607
5973
  projections: SearchIndexerKnowledgeStoreProjection[];
5974
+ /**
5975
+ * The user-assigned managed identity used for connections to Azure Storage when writing
5976
+ * knowledge store projections. If the connection string indicates an identity (ResourceId) and
5977
+ * it's not specified, the system-assigned managed identity is used. On updates to the indexer,
5978
+ * if the identity is unspecified, the value remains unchanged. If set to "none", the value of
5979
+ * this property is cleared.
5980
+ */
5981
+ identity?: SearchIndexerDataIdentity;
4608
5982
  }
4609
5983
 
4610
5984
  /** Abstract class to share properties between concrete selectors. */
@@ -4621,6 +5995,14 @@ export declare interface SearchIndexerKnowledgeStoreFileProjectionSelector exten
4621
5995
  export declare interface SearchIndexerKnowledgeStoreObjectProjectionSelector extends SearchIndexerKnowledgeStoreBlobProjectionSelector {
4622
5996
  }
4623
5997
 
5998
+ /** A dictionary of knowledge store-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */
5999
+ export declare interface SearchIndexerKnowledgeStoreParameters {
6000
+ /** Describes unknown properties. The value of an unknown property can be of "any" type. */
6001
+ [property: string]: unknown;
6002
+ /** Whether or not projections should synthesize a generated key name if one isn't already present. */
6003
+ synthesizeGeneratedKeyName?: boolean;
6004
+ }
6005
+
4624
6006
  /** Container object for various projection selectors. */
4625
6007
  export declare interface SearchIndexerKnowledgeStoreProjection {
4626
6008
  /** Projections to Azure Table storage. */
@@ -4672,7 +6054,7 @@ export declare interface SearchIndexerLimits {
4672
6054
  /**
4673
6055
  * Contains the possible cases for Skill.
4674
6056
  */
4675
- export declare type SearchIndexerSkill = ConditionalSkill | KeyPhraseExtractionSkill | OcrSkill | ImageAnalysisSkill | LanguageDetectionSkill | ShaperSkill | MergeSkill | EntityRecognitionSkill | SentimentSkill | SentimentSkillV3 | EntityLinkingSkill | EntityRecognitionSkillV3 | PIIDetectionSkill | SplitSkill | CustomEntityLookupSkill | TextTranslationSkill | DocumentExtractionSkill | WebApiSkill;
6057
+ export declare type SearchIndexerSkill = AzureMachineLearningSkill | AzureOpenAIEmbeddingSkill | ConditionalSkill | CustomEntityLookupSkill | DocumentExtractionSkill | EntityLinkingSkill | EntityRecognitionSkill | EntityRecognitionSkillV3 | ImageAnalysisSkill | KeyPhraseExtractionSkill | LanguageDetectionSkill | MergeSkill | OcrSkill | PIIDetectionSkill | SentimentSkill | SentimentSkillV3 | ShaperSkill | SplitSkill | TextTranslationSkill | WebApiSkill;
4676
6058
 
4677
6059
  /**
4678
6060
  * A list of skills.
@@ -4698,6 +6080,10 @@ export declare interface SearchIndexerSkillset {
4698
6080
  * Definition of additional projections to azure blob, table, or files, of enriched data.
4699
6081
  */
4700
6082
  knowledgeStore?: SearchIndexerKnowledgeStore;
6083
+ /**
6084
+ * Definition of additional projections to secondary search index(es).
6085
+ */
6086
+ indexProjections?: SearchIndexerIndexProjections;
4701
6087
  /**
4702
6088
  * The ETag of the skillset.
4703
6089
  */
@@ -5086,6 +6472,13 @@ export declare interface SearchResourceEncryptionKey {
5086
6472
  * The authentication key of the specified AAD application.
5087
6473
  */
5088
6474
  applicationSecret?: string;
6475
+ /**
6476
+ * An explicit managed identity to use for this encryption key. If not specified and the access
6477
+ * credentials property is null, the system-assigned managed identity is used. On update to the
6478
+ * resource, if the explicit identity is unspecified, it remains unchanged. If "none" is specified,
6479
+ * the value of this property is cleared.
6480
+ */
6481
+ identity?: SearchIndexerDataIdentity;
5089
6482
  }
5090
6483
 
5091
6484
  /**
@@ -5116,6 +6509,11 @@ export declare type SearchResult<TModel extends object, TFields extends SelectFi
5116
6509
  */
5117
6510
  readonly captions?: QueryCaptionResult[];
5118
6511
  document: NarrowedModel<TModel, TFields>;
6512
+ /**
6513
+ * Contains debugging information that can be used to further explore your search results.
6514
+ * NOTE: This property will not be serialized. It can only be populated by the server.
6515
+ */
6516
+ readonly documentDebugInfo?: DocumentDebugInfo[];
5119
6517
  };
5120
6518
 
5121
6519
  /**
@@ -5166,22 +6564,33 @@ export declare interface SemanticConfiguration {
5166
6564
  }
5167
6565
 
5168
6566
  /**
5169
- * partial: If the semantic processing fails, partial results still return. The definition of
5170
- * partial results depends on what semantic step failed and what was the reason for failure.
5171
- *
5172
- * fail: If there is an exception during the semantic processing step, the query will fail and
5173
- * return the appropriate HTTP code depending on the error.
6567
+ * Debug options for semantic search queries.
5174
6568
  */
6569
+ export declare interface SemanticDebugInfo {
6570
+ /**
6571
+ * The title field that was sent to the semantic enrichment process, as well as how it was used
6572
+ * NOTE: This property will not be serialized. It can only be populated by the server.
6573
+ */
6574
+ readonly titleField?: QueryResultDocumentSemanticField;
6575
+ /**
6576
+ * The content fields that were sent to the semantic enrichment process, as well as how they were used
6577
+ * NOTE: This property will not be serialized. It can only be populated by the server.
6578
+ */
6579
+ readonly contentFields?: QueryResultDocumentSemanticField[];
6580
+ /**
6581
+ * The keyword fields that were sent to the semantic enrichment process, as well as how they were used
6582
+ * NOTE: This property will not be serialized. It can only be populated by the server.
6583
+ */
6584
+ readonly keywordFields?: QueryResultDocumentSemanticField[];
6585
+ /**
6586
+ * The raw concatenated strings that were sent to the semantic enrichment process.
6587
+ * NOTE: This property will not be serialized. It can only be populated by the server.
6588
+ */
6589
+ readonly rerankerInput?: QueryResultDocumentRerankerInput;
6590
+ }
6591
+
5175
6592
  export declare type SemanticErrorMode = "partial" | "fail";
5176
6593
 
5177
- /**
5178
- * maxWaitExceeded: If 'semanticMaxWaitInMilliseconds' was set and the semantic processing duration
5179
- * exceeded that value. Only the base results were returned.
5180
- *
5181
- * capacityOverloaded: The request was throttled. Only the base results were returned.
5182
- *
5183
- * transient: At least one step of the semantic process failed.
5184
- */
5185
6594
  export declare type SemanticErrorReason = "maxWaitExceeded" | "capacityOverloaded" | "transient";
5186
6595
 
5187
6596
  /** A field that is used as part of the semantic configuration. */
@@ -5189,6 +6598,17 @@ export declare interface SemanticField {
5189
6598
  name: string;
5190
6599
  }
5191
6600
 
6601
+ /**
6602
+ * Defines values for SemanticFieldState. \
6603
+ * {@link KnownSemanticFieldState} can be used interchangeably with SemanticFieldState,
6604
+ * this enum contains the known values that the service supports.
6605
+ * ### Known values supported by the service
6606
+ * **used**: The field was fully used for semantic enrichment. \
6607
+ * **unused**: The field was not used for semantic enrichment. \
6608
+ * **partial**: The field was partially used for semantic enrichment.
6609
+ */
6610
+ export declare type SemanticFieldState = string;
6611
+
5192
6612
  /** Describes the title, content, and keywords fields to be used for semantic ranking, captions, highlights, and answers. */
5193
6613
  export declare interface SemanticPrioritizedFields {
5194
6614
  /** Defines the title field to be used for semantic ranking, captions, highlights, and answers. If you don't have a title field in your index, leave this blank. */
@@ -5236,14 +6656,22 @@ export declare interface SemanticSearchOptions {
5236
6656
  * to 'None'.
5237
6657
  */
5238
6658
  captions?: QueryCaption;
6659
+ /**
6660
+ * Allows setting a separate search query that will be solely used for semantic reranking,
6661
+ * semantic captions and semantic answers. Is useful for scenarios where there is a need to use
6662
+ * different queries between the base retrieval and ranking phase, and the L2 semantic phase.
6663
+ */
6664
+ semanticQuery?: string;
6665
+ /**
6666
+ * The list of field names used for semantic search.
6667
+ */
6668
+ semanticFields?: string[];
6669
+ /**
6670
+ * Enables a debugging tool that can be used to further explore your search results.
6671
+ */
6672
+ debugMode?: QueryDebugMode;
5239
6673
  }
5240
6674
 
5241
- /**
5242
- * baseResults: Results without any semantic enrichment or reranking.
5243
- *
5244
- * rerankedResults: Results have been reranked with the reranker model and will include semantic
5245
- * captions. They will not include any answers, answers highlights or caption highlights.
5246
- */
5247
6675
  export declare type SemanticSearchResultsType = "baseResults" | "rerankedResults";
5248
6676
 
5249
6677
  /**
@@ -5258,10 +6686,6 @@ export declare interface SentimentSkill extends BaseSearchIndexerSkill {
5258
6686
  defaultLanguageCode?: SentimentSkillLanguage;
5259
6687
  }
5260
6688
 
5261
- /**
5262
- * Defines supported languages for {@link SentimentSkill}
5263
- * {@link KnownSentimentSkillLanguage} can be used interchangeably with this type
5264
- */
5265
6689
  export declare type SentimentSkillLanguage = "da" | "nl" | "en" | "fi" | "fr" | "de" | "el" | "it" | "no" | "pl" | "pt-PT" | "ru" | "es" | "sv" | "tr";
5266
6690
 
5267
6691
  /** Using the Text Analytics API, evaluates unstructured text and for each record, provides sentiment labels (such as "negative", "neutral" and "positive") based on the highest confidence score found by the service at a sentence and document-level. */
@@ -5278,6 +6702,8 @@ export declare interface SentimentSkillV3 extends BaseSearchIndexerSkill {
5278
6702
 
5279
6703
  /** Represents service-level resource counters and quotas. */
5280
6704
  export declare interface ServiceCounters {
6705
+ /** Total number of aliases. */
6706
+ aliasCounter: ResourceCounter;
5281
6707
  /** Total number of documents across all indexes in the service. */
5282
6708
  documentCounter: ResourceCounter;
5283
6709
  /** Total number of indexes. */
@@ -5291,7 +6717,7 @@ export declare interface ServiceCounters {
5291
6717
  /** Total number of synonym maps. */
5292
6718
  synonymMapCounter: ResourceCounter;
5293
6719
  /** Total number of skillsets. */
5294
- skillsetCounter?: ResourceCounter;
6720
+ skillsetCounter: ResourceCounter;
5295
6721
  /** Total memory consumption of all vector indexes within the service, in bytes. */
5296
6722
  vectorIndexSizeCounter: ResourceCounter;
5297
6723
  }
@@ -5369,14 +6795,25 @@ export declare interface SimpleField {
5369
6795
  */
5370
6796
  key?: boolean;
5371
6797
  /**
5372
- * A value indicating whether the field can be returned in a search result. You can enable this
6798
+ * A value indicating whether the field can be returned in a search result. You can disable this
5373
6799
  * option if you want to use a field (for example, margin) as a filter, sorting, or scoring
5374
- * mechanism but do not want the field to be visible to the end user. This property must be false
5375
- * for key fields. This property can be changed on existing fields.
5376
- * Disabling this property does not cause any increase in index storage requirements.
5377
- * Default is false.
6800
+ * mechanism but do not want the field to be visible to the end user. This property must be true
6801
+ * for key fields. This property can be changed on existing fields. Enabling this property does
6802
+ * not cause any increase in index storage requirements. Default is true for simple fields and
6803
+ * false for vector fields.
5378
6804
  */
5379
6805
  hidden?: boolean;
6806
+ /**
6807
+ * An immutable value indicating whether the field will be persisted separately on disk to be
6808
+ * returned in a search result. You can disable this option if you don't plan to return the field
6809
+ * contents in a search response to save on storage overhead. This can only be set during index
6810
+ * creation and only for vector fields. This property cannot be changed for existing fields or set
6811
+ * as false for new fields. If this property is set as false, the property `hidden` must be set as
6812
+ * true. This property must be true or unset for key fields, for new fields, and for non-vector
6813
+ * fields, and it must be null for complex fields. Disabling this property will reduce index
6814
+ * storage requirements. The default is true for vector fields.
6815
+ */
6816
+ stored?: boolean;
5380
6817
  /**
5381
6818
  * A value indicating whether the field is full-text searchable. This means it will undergo
5382
6819
  * analysis such as word-breaking during indexing. If you set a searchable field to a value like
@@ -5419,21 +6856,21 @@ export declare interface SimpleField {
5419
6856
  * The name of the language analyzer to use for the field. This option can be used only with
5420
6857
  * searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer.
5421
6858
  * Once the analyzer is chosen, it cannot be changed for the field.
5422
- * {@link KnownAnalyzerNames} is an enum containing built-in analyzer names.
6859
+ * KnownAnalyzerNames is an enum containing known values.
5423
6860
  */
5424
6861
  analyzerName?: LexicalAnalyzerName;
5425
6862
  /**
5426
6863
  * The name of the analyzer used at search time for the field. This option can be used only with
5427
6864
  * searchable fields. It must be set together with indexAnalyzer and it cannot be set together
5428
6865
  * with the analyzer option. This analyzer can be updated on an existing field.
5429
- * {@link KnownAnalyzerNames} is an enum containing built-in analyzer names.
6866
+ * KnownAnalyzerNames is an enum containing known values.
5430
6867
  */
5431
6868
  searchAnalyzerName?: LexicalAnalyzerName;
5432
6869
  /**
5433
6870
  * The name of the analyzer used at indexing time for the field. This option can be used only
5434
6871
  * with searchable fields. It must be set together with searchAnalyzer and it cannot be set
5435
6872
  * together with the analyzer option. Once the analyzer is chosen, it cannot be changed for the
5436
- * field. {@link KnownAnalyzerNames} is an enum containing built-in analyzer names.
6873
+ * field. KnownAnalyzerNames is an enum containing known values.
5437
6874
  */
5438
6875
  indexAnalyzerName?: LexicalAnalyzerName;
5439
6876
  /**
@@ -5444,6 +6881,10 @@ export declare interface SimpleField {
5444
6881
  * fields.
5445
6882
  */
5446
6883
  synonymMapNames?: string[];
6884
+ /**
6885
+ * The name of the normalizer used at indexing time for the field.
6886
+ */
6887
+ normalizerName?: LexicalNormalizerName;
5447
6888
  /**
5448
6889
  * The dimensionality of the vector field.
5449
6890
  */
@@ -5476,6 +6917,16 @@ export declare interface SoftDeleteColumnDeletionDetectionPolicy extends BaseDat
5476
6917
  softDeleteMarkerValue?: string;
5477
6918
  }
5478
6919
 
6920
+ /**
6921
+ * Defines values for Speller. \
6922
+ * {@link KnownSpeller} can be used interchangeably with Speller,
6923
+ * this enum contains the known values that the service supports.
6924
+ * ### Known values supported by the service
6925
+ * **none**: Speller not enabled. \
6926
+ * **lexicon**: Speller corrects individual query terms using a static lexicon for the language specified by the queryLanguage parameter.
6927
+ */
6928
+ export declare type Speller = string;
6929
+
5479
6930
  /** A skill to split a string into chunks of text. */
5480
6931
  export declare interface SplitSkill extends BaseSearchIndexerSkill {
5481
6932
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -5488,10 +6939,6 @@ export declare interface SplitSkill extends BaseSearchIndexerSkill {
5488
6939
  maxPageLength?: number;
5489
6940
  }
5490
6941
 
5491
- /**
5492
- * Defines supported languages for {@link SplitSkill}
5493
- * {@link KnownSplitSkillLanguage} can be used interchangeably with this type
5494
- */
5495
6942
  export declare type SplitSkillLanguage = "am" | "bs" | "cs" | "da" | "de" | "en" | "es" | "et" | "fi" | "fr" | "he" | "hi" | "hr" | "hu" | "id" | "is" | "it" | "ja" | "ko" | "lv" | "nb" | "nl" | "pl" | "pt" | "pt-br" | "ru" | "sk" | "sl" | "sr" | "sv" | "tr" | "ur" | "zh";
5496
6943
 
5497
6944
  /** Defines a data change detection policy that captures changes using the Integrated Change Tracking feature of Azure SQL Database. */
@@ -5709,11 +7156,7 @@ export declare interface TextTranslationSkill extends BaseSearchIndexerSkill {
5709
7156
  suggestedFrom?: TextTranslationSkillLanguage;
5710
7157
  }
5711
7158
 
5712
- /**
5713
- * Defines supported languages for {@link TextTranslationSkill}
5714
- * {@link KnownTextTranslationSkillLanguage} can be used interchangeably with this type
5715
- */
5716
- export declare type TextTranslationSkillLanguage = "af" | "ar" | "bn" | "bs" | "bg" | "yue" | "ca" | "zh-Hans" | "zh-Hant" | "hr" | "cs" | "da" | "nl" | "en" | "et" | "fj" | "fil" | "fi" | "fr" | "de" | "el" | "ht" | "he" | "hi" | "mww" | "hu" | "is" | "id" | "it" | "ja" | "sw" | "tlh" | "ko" | "lv" | "lt" | "mg" | "ms" | "mt" | "nb" | "fa" | "pl" | "pt" | "otq" | "ro" | "ru" | "sm" | "sr-Cyrl" | "sr-Latn" | "sk" | "sl" | "es" | "sv" | "ty" | "ta" | "te" | "th" | "to" | "tr" | "uk" | "ur" | "vi" | "cy" | "yua";
7159
+ export declare type TextTranslationSkillLanguage = "af" | "ar" | "bn" | "bs" | "bg" | "yue" | "ca" | "zh-Hans" | "zh-Hant" | "hr" | "cs" | "da" | "nl" | "en" | "et" | "fj" | "fil" | "fi" | "fr" | "de" | "el" | "ht" | "he" | "hi" | "mww" | "hu" | "is" | "id" | "it" | "ja" | "sw" | "tlh" | "tlh-Latn" | "tlh-Piqd" | "ko" | "lv" | "lt" | "mg" | "ms" | "mt" | "nb" | "fa" | "pl" | "pt" | "pt-br" | "pt-PT" | "otq" | "ro" | "ru" | "sm" | "sr-Cyrl" | "sr-Latn" | "sk" | "sl" | "es" | "sv" | "ty" | "ta" | "te" | "th" | "to" | "tr" | "uk" | "ur" | "vi" | "cy" | "yua" | "ga" | "kn" | "mi" | "ml" | "pa";
5717
7160
 
5718
7161
  /** Defines weights on index fields for which matches should boost scoring in search queries. */
5719
7162
  export declare interface TextWeights {
@@ -5752,7 +7195,7 @@ export declare type TokenFilter = AsciiFoldingTokenFilter | CjkBigramTokenFilter
5752
7195
  * **kstem**: A high-performance kstem filter for English. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html \
5753
7196
  * **length**: Removes words that are too long or too short. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html \
5754
7197
  * **limit**: Limits the number of tokens while indexing. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html \
5755
- * **lowercase**: Normalizes token text to lower case. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.htm \
7198
+ * **lowercase**: Normalizes token text to lower case. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \
5756
7199
  * **nGram_v2**: Generates n-grams of the given size(s). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html \
5757
7200
  * **persian_normalization**: Applies normalization for Persian. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html \
5758
7201
  * **phonetic**: Create tokens for phonetic matches. See https:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html \
@@ -5768,7 +7211,7 @@ export declare type TokenFilter = AsciiFoldingTokenFilter | CjkBigramTokenFilter
5768
7211
  * **trim**: Trims leading and trailing whitespace from tokens. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html \
5769
7212
  * **truncate**: Truncates the terms to a specific length. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html \
5770
7213
  * **unique**: Filters out tokens with same text as the previous token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html \
5771
- * **uppercase**: Normalizes token text to upper case. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html \
7214
+ * **uppercase**: Normalizes token text to upper case. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html \
5772
7215
  * **word_delimiter**: Splits words into subwords and performs optional transformations on subword groups.
5773
7216
  */
5774
7217
  export declare type TokenFilterName = string;
@@ -5804,11 +7247,16 @@ export declare interface UniqueTokenFilter extends BaseTokenFilter {
5804
7247
  */
5805
7248
  export declare type UploadDocumentsOptions = IndexDocumentsOptions;
5806
7249
 
5807
- /**
5808
- * Determines whether or not filters are applied before or after the vector search is performed.
5809
- */
5810
7250
  export declare type VectorFilterMode = "postFilter" | "preFilter";
5811
7251
 
7252
+ /** The query parameters to use for vector search when a text value that needs to be vectorized is provided. */
7253
+ export declare interface VectorizableTextQuery<TModel extends object> extends BaseVectorQuery<TModel> {
7254
+ /** Polymorphic discriminator, which specifies the different types this object can be */
7255
+ kind: "text";
7256
+ /** The text to be vectorized to perform a vector search query. */
7257
+ text?: string;
7258
+ }
7259
+
5812
7260
  /** The query parameters to use for vector search when a raw vector value is provided. */
5813
7261
  export declare interface VectorizedQuery<TModel extends object> extends BaseVectorQuery<TModel> {
5814
7262
  /** Polymorphic discriminator, which specifies the different types this object can be */
@@ -5818,9 +7266,9 @@ export declare interface VectorizedQuery<TModel extends object> extends BaseVect
5818
7266
  }
5819
7267
 
5820
7268
  /** The query parameters for vector and hybrid search queries. */
5821
- export declare type VectorQuery<TModel extends object> = VectorizedQuery<TModel>;
7269
+ export declare type VectorQuery<TModel extends object> = VectorizedQuery<TModel> | VectorizableTextQuery<TModel>;
5822
7270
 
5823
- export declare type VectorQueryKind = "vector";
7271
+ export declare type VectorQueryKind = "vector" | "text";
5824
7272
 
5825
7273
  /** Contains configuration options related to vector search. */
5826
7274
  export declare interface VectorSearch {
@@ -5828,6 +7276,13 @@ export declare interface VectorSearch {
5828
7276
  profiles?: VectorSearchProfile[];
5829
7277
  /** Contains configuration options specific to the algorithm used during indexing and/or querying. */
5830
7278
  algorithms?: VectorSearchAlgorithmConfiguration[];
7279
+ /** Contains configuration options on how to vectorize text vector queries. */
7280
+ vectorizers?: VectorSearchVectorizer[];
7281
+ /**
7282
+ * Contains configuration options specific to the compression method used during indexing or
7283
+ * querying.
7284
+ */
7285
+ compressions?: VectorSearchCompressionConfiguration[];
5831
7286
  }
5832
7287
 
5833
7288
  /** Contains configuration options specific to the algorithm used during indexing and/or querying. */
@@ -5835,9 +7290,32 @@ export declare type VectorSearchAlgorithmConfiguration = HnswAlgorithmConfigurat
5835
7290
 
5836
7291
  export declare type VectorSearchAlgorithmKind = "hnsw" | "exhaustiveKnn";
5837
7292
 
5838
- /** The similarity metric to use for vector comparisons. */
5839
7293
  export declare type VectorSearchAlgorithmMetric = "cosine" | "euclidean" | "dotProduct";
5840
7294
 
7295
+ /**
7296
+ * Contains configuration options specific to the compression method used during indexing or
7297
+ * querying.
7298
+ */
7299
+ export declare type VectorSearchCompressionConfiguration = ScalarQuantizationCompressionConfiguration;
7300
+
7301
+ /**
7302
+ * Defines values for VectorSearchCompressionKind. \
7303
+ * {@link KnownVectorSearchCompressionKind} can be used interchangeably with VectorSearchCompressionKind,
7304
+ * this enum contains the known values that the service supports.
7305
+ * ### Known values supported by the service
7306
+ * **scalarQuantization**: Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size.
7307
+ */
7308
+ export declare type VectorSearchCompressionKind = string;
7309
+
7310
+ /**
7311
+ * Defines values for VectorSearchCompressionTargetDataType. \
7312
+ * {@link KnownVectorSearchCompressionTargetDataType} can be used interchangeably with VectorSearchCompressionTargetDataType,
7313
+ * this enum contains the known values that the service supports.
7314
+ * ### Known values supported by the service
7315
+ * **int8**
7316
+ */
7317
+ export declare type VectorSearchCompressionTargetDataType = string;
7318
+
5841
7319
  /**
5842
7320
  * Defines options for vector search queries
5843
7321
  */
@@ -5859,8 +7337,17 @@ export declare interface VectorSearchProfile {
5859
7337
  name: string;
5860
7338
  /** The name of the vector search algorithm configuration that specifies the algorithm and optional parameters. */
5861
7339
  algorithmConfigurationName: string;
7340
+ /** The name of the kind of vectorization method being configured for use with vector search. */
7341
+ vectorizer?: string;
7342
+ /** The name of the compression method configuration that specifies the compression method and optional parameters. */
7343
+ compressionConfigurationName?: string;
5862
7344
  }
5863
7345
 
7346
+ /** Contains configuration options on how to vectorize text vector queries. */
7347
+ export declare type VectorSearchVectorizer = AzureOpenAIVectorizer | CustomVectorizer;
7348
+
7349
+ export declare type VectorSearchVectorizerKind = "azureOpenAI" | "customWebApi";
7350
+
5864
7351
  export declare type VisualFeature = "adult" | "brands" | "categories" | "description" | "faces" | "objects" | "tags";
5865
7352
 
5866
7353
  /**
@@ -5898,6 +7385,22 @@ export declare interface WebApiSkill extends BaseSearchIndexerSkill {
5898
7385
  * If set, the number of parallel calls that can be made to the Web API.
5899
7386
  */
5900
7387
  degreeOfParallelism?: number;
7388
+ /**
7389
+ * Applies to custom skills that connect to external code in an Azure function or some other
7390
+ * application that provides the transformations. This value should be the application ID
7391
+ * created for the function or app when it was registered with Azure Active Directory. When
7392
+ * specified, the custom skill connects to the function or app using a managed ID (either system
7393
+ * or user-assigned) of the search service and the access token of the function or app, using
7394
+ * this value as the resource id for creating the scope of the access token.
7395
+ */
7396
+ authResourceId?: string;
7397
+ /**
7398
+ * The user-assigned managed identity used for outbound connections. If an authResourceId is
7399
+ * provided and it's not specified, the system-assigned managed identity is used. On updates to
7400
+ * the indexer, if the identity is unspecified, the value remains unchanged. If undefined, the
7401
+ * value of this property is cleared.
7402
+ */
7403
+ authIdentity?: SearchIndexerDataIdentity;
5901
7404
  }
5902
7405
 
5903
7406
  /** Splits words into subwords and performs optional transformations on subword groups. This token filter is implemented using Apache Lucene. */