@azure/search-documents 11.3.0-alpha.20211105.2 → 11.3.0-alpha.20211109.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- {"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;AAElC,OAAO,EAAE,YAAY,EAAuB,MAAM,gBAAgB,CAAC;AACnE,OAAO,EACL,kBAAkB,EAClB,oBAAoB,EACpB,mBAAmB,EACpB,MAAM,gCAAgC,CAAC;AAgCxC,OAAO,EAAE,4BAA4B,EAAwB,MAAM,gCAAgC,CAAC;AACpG,OAAO,EAAE,iBAAiB,EAA4B,MAAM,qBAAqB,CAAC;AAClF,OAAO,EAAE,mBAAmB,EAA8B,MAAM,uBAAuB,CAAC;AACxF,OAAO,EAwBL,kBAAkB,EAClB,oBAAoB,EACpB,qBAAqB,EACrB,mBAAmB,EA+CpB,MAAM,iBAAiB,CAAC;AACzB,OAAO,EAAE,OAAO,IAAI,cAAc,EAAE,MAAM,kBAAkB,CAAC;AAC7D,OAAO,EAAE,KAAK,EAAE,MAAM,SAAS,CAAC;AAChC,OAAO,EAAE,mBAAmB,EAAE,MAAM,uBAAuB,CAAC;AAC5D,OAAO,EAYL,YAAY,EAEZ,kBAAkB,EAElB,YAAY,EAOZ,qBAAqB,EACrB,oBAAoB,EACpB,qBAAqB,EACtB,MAAM,yBAAyB,CAAC;AACjC,OAAO,EAEL,eAAe,EAqEf,oCAAoC,EAYpC,2BAA2B,EAE3B,uBAAuB,EAEvB,kBAAkB,EAElB,iCAAiC,EAMjC,mBAAmB,EAEnB,mCAAmC,EAEnC,+BAA+B,EAE/B,gBAAgB,EAEhB,kBAAkB,EAElB,qCAAqC,EAErC,qBAAqB,EAcrB,gCAAgC,EAUhC,wBAAwB,EAKxB,6BAA6B,EAG7B,2BAA2B,EAE3B,2BAA2B,EAE3B,wCAAwC,EAYxC,0BAA0B,EAG1B,oBAAoB,EAEpB,mBAAmB,EAUnB,iCAAiC,EAEjC,eAAe,EAKf,iCAAiC,EAEjC,iBAAiB,EAClB,MAAM,4BAA4B,CAAC;AACpC,OAAO,EAAE,kBAAkB,EAAE,MAAM,kBAAkB,CAAC;AACtD,OAAO,EAAE,wBAAwB,EAAE,MAAM,oBAAoB,CAAC","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT license.\n\nexport { SearchClient, SearchClientOptions } from \"./searchClient\";\nexport {\n DEFAULT_BATCH_SIZE,\n DEFAULT_FLUSH_WINDOW,\n DEFAULT_RETRY_COUNT\n} from \"./searchIndexingBufferedSender\";\nexport {\n AutocompleteRequest,\n AutocompleteOptions,\n CountDocumentsOptions,\n DeleteDocumentsOptions,\n GetDocumentOptions,\n IndexDocumentsAction,\n ListSearchResultsPageSettings,\n IndexDocumentsOptions,\n SearchDocumentsResultBase,\n SearchDocumentsResult,\n SearchDocumentsPageResult,\n SearchIterator,\n SearchOptions,\n SearchRequestOptions,\n SearchRequest,\n SearchResult,\n SuggestDocumentsResult,\n SuggestRequest,\n SuggestResult,\n SuggestOptions,\n MergeDocumentsOptions,\n MergeOrUploadDocumentsOptions,\n UploadDocumentsOptions,\n SearchIndexingBufferedSenderOptions,\n SearchIndexingBufferedSenderDeleteDocumentsOptions,\n SearchIndexingBufferedSenderFlushDocumentsOptions,\n SearchIndexingBufferedSenderMergeDocumentsOptions,\n SearchIndexingBufferedSenderMergeOrUploadDocumentsOptions,\n SearchIndexingBufferedSenderUploadDocumentsOptions\n} from \"./indexModels\";\nexport { SearchIndexingBufferedSender, IndexDocumentsClient } from \"./searchIndexingBufferedSender\";\nexport { SearchIndexClient, SearchIndexClientOptions } from \"./searchIndexClient\";\nexport { SearchIndexerClient, SearchIndexerClientOptions } from \"./searchIndexerClient\";\nexport {\n SearchIndex,\n LexicalAnalyzer,\n TokenFilter,\n LexicalTokenizer,\n CharFilter,\n ListIndexesOptions,\n CreateIndexOptions,\n CreateOrUpdateIndexOptions,\n CreateOrUpdateSkillsetOptions,\n CreateOrUpdateSynonymMapOptions,\n CreateSkillsetOptions,\n CreateSynonymMapOptions,\n DeleteSkillsetOptions,\n DeleteSynonymMapOptions,\n GetSkillSetOptions,\n GetSynonymMapsOptions,\n ListSkillsetsOptions,\n SearchIndexerSkillset,\n ListSynonymMapsOptions,\n DeleteIndexOptions,\n AnalyzeTextOptions,\n GetIndexOptions,\n GetIndexStatisticsOptions,\n KnownAnalyzerNames,\n KnownCharFilterNames,\n KnownTokenFilterNames,\n KnownTokenizerNames,\n ScoringFunction,\n ScoringProfile,\n CustomAnalyzer,\n PatternAnalyzer,\n PatternTokenizer,\n SearchField,\n SimpleField,\n ComplexField,\n SearchFieldDataType,\n ComplexDataType,\n CognitiveServicesAccount,\n SearchIndexerSkill,\n SynonymMap,\n ListIndexersOptions,\n CreateIndexerOptions,\n GetIndexerOptions,\n CreateorUpdateIndexerOptions,\n DeleteIndexerOptions,\n GetIndexerStatusOptions,\n ResetIndexerOptions,\n RunIndexerOptions,\n CreateDataSourceConnectionOptions,\n CreateorUpdateDataSourceConnectionOptions,\n DeleteDataSourceConnectionOptions,\n GetDataSourceConnectionOptions,\n ListDataSourceConnectionsOptions,\n SearchIndexerDataSourceConnection,\n DataChangeDetectionPolicy,\n DataDeletionDetectionPolicy,\n GetServiceStatisticsOptions,\n IndexIterator,\n IndexNameIterator,\n SimilarityAlgorithm,\n NGramTokenFilter,\n LuceneStandardTokenizer,\n EdgeNGramTokenFilter,\n KeywordTokenizer,\n AnalyzeRequest,\n SearchResourceEncryptionKey,\n SearchIndexStatistics,\n SearchServiceStatistics,\n SearchIndexer,\n LexicalNormalizer,\n SearchIndexerDataIdentity,\n ResetDocumentsOptions,\n ResetSkillsOptions\n} from \"./serviceModels\";\nexport { default as GeographyPoint } from \"./geographyPoint\";\nexport { odata } from \"./odata\";\nexport { IndexDocumentsBatch } from \"./indexDocumentsBatch\";\nexport {\n AutocompleteResult,\n AutocompleteMode,\n AutocompleteItem,\n FacetResult,\n IndexActionType,\n IndexDocumentsResult,\n IndexingResult,\n QueryType,\n SearchMode,\n ScoringStatistics,\n Answers,\n KnownAnswers,\n QueryLanguage,\n KnownQueryLanguage,\n Speller,\n KnownSpeller,\n CaptionResult,\n AnswerResult,\n Captions,\n QueryAnswerType,\n QueryCaptionType,\n QuerySpellerType,\n KnownQuerySpellerType,\n KnownQueryAnswerType,\n KnownQueryCaptionType\n} from \"./generated/data/models\";\nexport {\n RegexFlags,\n KnownRegexFlags,\n LuceneStandardAnalyzer,\n StopAnalyzer,\n MappingCharFilter,\n PatternReplaceCharFilter,\n CorsOptions,\n AzureActiveDirectoryApplicationCredentials,\n ScoringFunctionAggregation,\n ScoringFunctionInterpolation,\n DistanceScoringParameters,\n DistanceScoringFunction,\n FreshnessScoringParameters,\n FreshnessScoringFunction,\n MagnitudeScoringParameters,\n MagnitudeScoringFunction,\n TagScoringParameters,\n TagScoringFunction,\n TextWeights,\n AsciiFoldingTokenFilter,\n CjkBigramTokenFilterScripts,\n CjkBigramTokenFilter,\n CommonGramTokenFilter,\n DictionaryDecompounderTokenFilter,\n EdgeNGramTokenFilterSide,\n ElisionTokenFilter,\n KeepTokenFilter,\n KeywordMarkerTokenFilter,\n LengthTokenFilter,\n LimitTokenFilter,\n PatternCaptureTokenFilter,\n PatternReplaceTokenFilter,\n PhoneticEncoder,\n PhoneticTokenFilter,\n ShingleTokenFilter,\n SnowballTokenFilterLanguage,\n SnowballTokenFilter,\n StemmerTokenFilterLanguage,\n StemmerTokenFilter,\n StemmerOverrideTokenFilter,\n StopwordsList,\n StopwordsTokenFilter,\n SynonymTokenFilter,\n TruncateTokenFilter,\n UniqueTokenFilter,\n WordDelimiterTokenFilter,\n ClassicTokenizer,\n TokenCharacterKind,\n EdgeNGramTokenizer,\n MicrosoftTokenizerLanguage,\n MicrosoftLanguageTokenizer,\n MicrosoftStemmingTokenizerLanguage,\n MicrosoftLanguageStemmingTokenizer,\n NGramTokenizer,\n PathHierarchyTokenizerV2 as PathHierarchyTokenizer,\n UaxUrlEmailTokenizer,\n Suggester as SearchSuggester,\n AnalyzeResult,\n AnalyzedTokenInfo,\n ConditionalSkill,\n KeyPhraseExtractionSkill,\n OcrSkill,\n ImageAnalysisSkill,\n LanguageDetectionSkill,\n ShaperSkill,\n MergeSkill,\n EntityRecognitionSkill,\n SentimentSkill,\n CustomEntityLookupSkill,\n CustomEntityLookupSkillLanguage,\n KnownCustomEntityLookupSkillLanguage,\n DocumentExtractionSkill,\n CustomEntity,\n CustomEntityAlias,\n SplitSkill,\n PIIDetectionSkill,\n EntityRecognitionSkillV3,\n EntityLinkingSkill,\n SentimentSkillV3,\n TextTranslationSkill,\n WebApiSkill,\n SentimentSkillLanguage,\n KnownSentimentSkillLanguage,\n SplitSkillLanguage,\n KnownSplitSkillLanguage,\n TextSplitMode,\n KnownTextSplitMode,\n TextTranslationSkillLanguage,\n KnownTextTranslationSkillLanguage,\n DefaultCognitiveServicesAccount,\n CognitiveServicesAccountKey,\n InputFieldMappingEntry,\n OutputFieldMappingEntry,\n EntityCategory,\n KnownEntityCategory,\n EntityRecognitionSkillLanguage,\n KnownEntityRecognitionSkillLanguage,\n ImageAnalysisSkillLanguage,\n KnownImageAnalysisSkillLanguage,\n ImageDetail,\n KnownImageDetail,\n VisualFeature,\n KnownVisualFeature,\n KeyPhraseExtractionSkillLanguage,\n KnownKeyPhraseExtractionSkillLanguage,\n OcrSkillLanguage,\n KnownOcrSkillLanguage,\n FieldMapping,\n IndexingParameters,\n IndexingSchedule,\n FieldMappingFunction,\n SearchIndexerStatus,\n IndexerExecutionResult,\n SearchIndexerLimits,\n IndexerStatus,\n SearchIndexerError,\n IndexerExecutionStatus,\n SearchIndexerWarning,\n SearchIndexerDataContainer,\n SearchIndexerDataSourceType,\n KnownSearchIndexerDataSourceType,\n SoftDeleteColumnDeletionDetectionPolicy,\n SqlIntegratedChangeTrackingPolicy,\n HighWaterMarkChangeDetectionPolicy,\n SearchIndexerDataUserAssignedIdentity,\n SearchIndexerDataNoneIdentity,\n ServiceCounters,\n ServiceLimits,\n ResourceCounter,\n LexicalAnalyzerName,\n KnownLexicalAnalyzerName,\n ClassicSimilarity,\n BM25Similarity,\n IndexingParametersConfiguration,\n BlobIndexerDataToExtract,\n KnownBlobIndexerDataToExtract,\n IndexerExecutionEnvironment,\n BlobIndexerImageAction,\n KnownBlobIndexerImageAction,\n BlobIndexerParsingMode,\n KnownBlobIndexerParsingMode,\n BlobIndexerPDFTextRotationAlgorithm,\n KnownBlobIndexerPDFTextRotationAlgorithm,\n TokenFilter as BaseTokenFilter,\n Similarity,\n LexicalTokenizer as BaseLexicalTokenizer,\n CognitiveServicesAccount as BaseCognitiveServicesAccount,\n SearchIndexerSkill as BaseSearchIndexerSkill,\n ScoringFunction as BaseScoringFunction,\n DataChangeDetectionPolicy as BaseDataChangeDetectionPolicy,\n LexicalAnalyzer as BaseLexicalAnalyzer,\n CharFilter as BaseCharFilter,\n DataDeletionDetectionPolicy as BaseDataDeletionDetectionPolicy,\n LexicalNormalizerName,\n KnownLexicalNormalizerName,\n CustomNormalizer,\n TokenFilterName,\n KnownTokenFilterName,\n CharFilterName,\n KnownCharFilterName,\n LexicalNormalizer as BaseLexicalNormalizer,\n SearchIndexerKnowledgeStore,\n SearchIndexerKnowledgeStoreProjection,\n SearchIndexerKnowledgeStoreFileProjectionSelector,\n SearchIndexerKnowledgeStoreBlobProjectionSelector,\n SearchIndexerKnowledgeStoreProjectionSelector,\n SearchIndexerKnowledgeStoreObjectProjectionSelector,\n SearchIndexerKnowledgeStoreTableProjectionSelector,\n PIIDetectionSkillMaskingMode,\n KnownPIIDetectionSkillMaskingMode,\n LineEnding,\n KnownLineEnding,\n SearchIndexerDataIdentity as BaseSearchIndexerDataIdentity,\n SearchIndexerCache,\n IndexerState,\n IndexerExecutionStatusDetail,\n KnownIndexerExecutionStatusDetail,\n IndexingMode,\n KnownIndexingMode\n} from \"./generated/service/models\";\nexport { AzureKeyCredential } from \"@azure/core-auth\";\nexport { createSynonymMapFromFile } from \"./synonymMapHelper\";\n"]}
1
+ {"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;AAElC,OAAO,EAAE,YAAY,EAAuB,MAAM,gBAAgB,CAAC;AACnE,OAAO,EACL,kBAAkB,EAClB,oBAAoB,EACpB,mBAAmB,EACpB,MAAM,gCAAgC,CAAC;AAgCxC,OAAO,EAAE,4BAA4B,EAAwB,MAAM,gCAAgC,CAAC;AACpG,OAAO,EAAE,iBAAiB,EAA4B,MAAM,qBAAqB,CAAC;AAClF,OAAO,EAAE,mBAAmB,EAA8B,MAAM,uBAAuB,CAAC;AACxF,OAAO,EAwBL,kBAAkB,EAClB,oBAAoB,EACpB,qBAAqB,EACrB,mBAAmB,EA+CpB,MAAM,iBAAiB,CAAC;AACzB,OAAO,EAAE,OAAO,IAAI,cAAc,EAAE,MAAM,kBAAkB,CAAC;AAC7D,OAAO,EAAE,KAAK,EAAE,MAAM,SAAS,CAAC;AAChC,OAAO,EAAE,mBAAmB,EAAE,MAAM,uBAAuB,CAAC;AAC5D,OAAO,EAYL,YAAY,EAEZ,kBAAkB,EAElB,YAAY,EAOZ,qBAAqB,EACrB,oBAAoB,EACpB,qBAAqB,EACtB,MAAM,yBAAyB,CAAC;AACjC,OAAO,EAEL,eAAe,EAqEf,oCAAoC,EAYpC,2BAA2B,EAE3B,uBAAuB,EAEvB,kBAAkB,EAElB,iCAAiC,EAMjC,mBAAmB,EAEnB,mCAAmC,EAEnC,+BAA+B,EAE/B,gBAAgB,EAEhB,kBAAkB,EAElB,qCAAqC,EAErC,qBAAqB,EAcrB,gCAAgC,EAUhC,wBAAwB,EAKxB,6BAA6B,EAG7B,2BAA2B,EAE3B,2BAA2B,EAE3B,wCAAwC,EAYxC,0BAA0B,EAG1B,oBAAoB,EAEpB,mBAAmB,EAUnB,iCAAiC,EAEjC,eAAe,EAKf,iCAAiC,EAEjC,iBAAiB,EAKlB,MAAM,4BAA4B,CAAC;AACpC,OAAO,EAAE,kBAAkB,EAAE,MAAM,kBAAkB,CAAC;AACtD,OAAO,EAAE,wBAAwB,EAAE,MAAM,oBAAoB,CAAC","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT license.\n\nexport { SearchClient, SearchClientOptions } from \"./searchClient\";\nexport {\n DEFAULT_BATCH_SIZE,\n DEFAULT_FLUSH_WINDOW,\n DEFAULT_RETRY_COUNT\n} from \"./searchIndexingBufferedSender\";\nexport {\n AutocompleteRequest,\n AutocompleteOptions,\n CountDocumentsOptions,\n DeleteDocumentsOptions,\n GetDocumentOptions,\n IndexDocumentsAction,\n ListSearchResultsPageSettings,\n IndexDocumentsOptions,\n SearchDocumentsResultBase,\n SearchDocumentsResult,\n SearchDocumentsPageResult,\n SearchIterator,\n SearchOptions,\n SearchRequestOptions,\n SearchRequest,\n SearchResult,\n SuggestDocumentsResult,\n SuggestRequest,\n SuggestResult,\n SuggestOptions,\n MergeDocumentsOptions,\n MergeOrUploadDocumentsOptions,\n UploadDocumentsOptions,\n SearchIndexingBufferedSenderOptions,\n SearchIndexingBufferedSenderDeleteDocumentsOptions,\n SearchIndexingBufferedSenderFlushDocumentsOptions,\n SearchIndexingBufferedSenderMergeDocumentsOptions,\n SearchIndexingBufferedSenderMergeOrUploadDocumentsOptions,\n SearchIndexingBufferedSenderUploadDocumentsOptions\n} from \"./indexModels\";\nexport { SearchIndexingBufferedSender, IndexDocumentsClient } from \"./searchIndexingBufferedSender\";\nexport { SearchIndexClient, SearchIndexClientOptions } from \"./searchIndexClient\";\nexport { SearchIndexerClient, SearchIndexerClientOptions } from \"./searchIndexerClient\";\nexport {\n SearchIndex,\n LexicalAnalyzer,\n TokenFilter,\n LexicalTokenizer,\n CharFilter,\n ListIndexesOptions,\n CreateIndexOptions,\n CreateOrUpdateIndexOptions,\n CreateOrUpdateSkillsetOptions,\n CreateOrUpdateSynonymMapOptions,\n CreateSkillsetOptions,\n CreateSynonymMapOptions,\n DeleteSkillsetOptions,\n DeleteSynonymMapOptions,\n GetSkillSetOptions,\n GetSynonymMapsOptions,\n ListSkillsetsOptions,\n SearchIndexerSkillset,\n ListSynonymMapsOptions,\n DeleteIndexOptions,\n AnalyzeTextOptions,\n GetIndexOptions,\n GetIndexStatisticsOptions,\n KnownAnalyzerNames,\n KnownCharFilterNames,\n KnownTokenFilterNames,\n KnownTokenizerNames,\n ScoringFunction,\n ScoringProfile,\n CustomAnalyzer,\n PatternAnalyzer,\n PatternTokenizer,\n SearchField,\n SimpleField,\n ComplexField,\n SearchFieldDataType,\n ComplexDataType,\n CognitiveServicesAccount,\n SearchIndexerSkill,\n SynonymMap,\n ListIndexersOptions,\n CreateIndexerOptions,\n GetIndexerOptions,\n CreateorUpdateIndexerOptions,\n DeleteIndexerOptions,\n GetIndexerStatusOptions,\n ResetIndexerOptions,\n RunIndexerOptions,\n CreateDataSourceConnectionOptions,\n CreateorUpdateDataSourceConnectionOptions,\n DeleteDataSourceConnectionOptions,\n GetDataSourceConnectionOptions,\n ListDataSourceConnectionsOptions,\n SearchIndexerDataSourceConnection,\n DataChangeDetectionPolicy,\n DataDeletionDetectionPolicy,\n GetServiceStatisticsOptions,\n IndexIterator,\n IndexNameIterator,\n SimilarityAlgorithm,\n NGramTokenFilter,\n LuceneStandardTokenizer,\n EdgeNGramTokenFilter,\n KeywordTokenizer,\n AnalyzeRequest,\n SearchResourceEncryptionKey,\n SearchIndexStatistics,\n SearchServiceStatistics,\n SearchIndexer,\n LexicalNormalizer,\n SearchIndexerDataIdentity,\n ResetDocumentsOptions,\n ResetSkillsOptions\n} from \"./serviceModels\";\nexport { default as GeographyPoint } from \"./geographyPoint\";\nexport { odata } from \"./odata\";\nexport { IndexDocumentsBatch } from \"./indexDocumentsBatch\";\nexport {\n AutocompleteResult,\n AutocompleteMode,\n AutocompleteItem,\n FacetResult,\n IndexActionType,\n IndexDocumentsResult,\n IndexingResult,\n QueryType,\n SearchMode,\n ScoringStatistics,\n Answers,\n KnownAnswers,\n QueryLanguage,\n KnownQueryLanguage,\n Speller,\n KnownSpeller,\n CaptionResult,\n AnswerResult,\n Captions,\n QueryAnswerType,\n QueryCaptionType,\n QuerySpellerType,\n KnownQuerySpellerType,\n KnownQueryAnswerType,\n KnownQueryCaptionType\n} from \"./generated/data/models\";\nexport {\n RegexFlags,\n KnownRegexFlags,\n LuceneStandardAnalyzer,\n StopAnalyzer,\n MappingCharFilter,\n PatternReplaceCharFilter,\n CorsOptions,\n AzureActiveDirectoryApplicationCredentials,\n ScoringFunctionAggregation,\n ScoringFunctionInterpolation,\n DistanceScoringParameters,\n DistanceScoringFunction,\n FreshnessScoringParameters,\n FreshnessScoringFunction,\n MagnitudeScoringParameters,\n MagnitudeScoringFunction,\n TagScoringParameters,\n TagScoringFunction,\n TextWeights,\n AsciiFoldingTokenFilter,\n CjkBigramTokenFilterScripts,\n CjkBigramTokenFilter,\n CommonGramTokenFilter,\n DictionaryDecompounderTokenFilter,\n EdgeNGramTokenFilterSide,\n ElisionTokenFilter,\n KeepTokenFilter,\n KeywordMarkerTokenFilter,\n LengthTokenFilter,\n LimitTokenFilter,\n PatternCaptureTokenFilter,\n PatternReplaceTokenFilter,\n PhoneticEncoder,\n PhoneticTokenFilter,\n ShingleTokenFilter,\n SnowballTokenFilterLanguage,\n SnowballTokenFilter,\n StemmerTokenFilterLanguage,\n StemmerTokenFilter,\n StemmerOverrideTokenFilter,\n StopwordsList,\n StopwordsTokenFilter,\n SynonymTokenFilter,\n TruncateTokenFilter,\n UniqueTokenFilter,\n WordDelimiterTokenFilter,\n ClassicTokenizer,\n TokenCharacterKind,\n EdgeNGramTokenizer,\n MicrosoftTokenizerLanguage,\n MicrosoftLanguageTokenizer,\n MicrosoftStemmingTokenizerLanguage,\n MicrosoftLanguageStemmingTokenizer,\n NGramTokenizer,\n PathHierarchyTokenizerV2 as PathHierarchyTokenizer,\n UaxUrlEmailTokenizer,\n Suggester as SearchSuggester,\n AnalyzeResult,\n AnalyzedTokenInfo,\n ConditionalSkill,\n KeyPhraseExtractionSkill,\n OcrSkill,\n ImageAnalysisSkill,\n LanguageDetectionSkill,\n ShaperSkill,\n MergeSkill,\n EntityRecognitionSkill,\n SentimentSkill,\n CustomEntityLookupSkill,\n CustomEntityLookupSkillLanguage,\n KnownCustomEntityLookupSkillLanguage,\n DocumentExtractionSkill,\n CustomEntity,\n CustomEntityAlias,\n SplitSkill,\n PIIDetectionSkill,\n EntityRecognitionSkillV3,\n EntityLinkingSkill,\n SentimentSkillV3,\n TextTranslationSkill,\n WebApiSkill,\n SentimentSkillLanguage,\n KnownSentimentSkillLanguage,\n SplitSkillLanguage,\n KnownSplitSkillLanguage,\n TextSplitMode,\n KnownTextSplitMode,\n TextTranslationSkillLanguage,\n KnownTextTranslationSkillLanguage,\n DefaultCognitiveServicesAccount,\n CognitiveServicesAccountKey,\n InputFieldMappingEntry,\n OutputFieldMappingEntry,\n EntityCategory,\n KnownEntityCategory,\n EntityRecognitionSkillLanguage,\n KnownEntityRecognitionSkillLanguage,\n ImageAnalysisSkillLanguage,\n KnownImageAnalysisSkillLanguage,\n ImageDetail,\n KnownImageDetail,\n VisualFeature,\n KnownVisualFeature,\n KeyPhraseExtractionSkillLanguage,\n KnownKeyPhraseExtractionSkillLanguage,\n OcrSkillLanguage,\n KnownOcrSkillLanguage,\n FieldMapping,\n IndexingParameters,\n IndexingSchedule,\n FieldMappingFunction,\n SearchIndexerStatus,\n IndexerExecutionResult,\n SearchIndexerLimits,\n IndexerStatus,\n SearchIndexerError,\n IndexerExecutionStatus,\n SearchIndexerWarning,\n SearchIndexerDataContainer,\n SearchIndexerDataSourceType,\n KnownSearchIndexerDataSourceType,\n SoftDeleteColumnDeletionDetectionPolicy,\n SqlIntegratedChangeTrackingPolicy,\n HighWaterMarkChangeDetectionPolicy,\n SearchIndexerDataUserAssignedIdentity,\n SearchIndexerDataNoneIdentity,\n ServiceCounters,\n ServiceLimits,\n ResourceCounter,\n LexicalAnalyzerName,\n KnownLexicalAnalyzerName,\n ClassicSimilarity,\n BM25Similarity,\n IndexingParametersConfiguration,\n BlobIndexerDataToExtract,\n KnownBlobIndexerDataToExtract,\n IndexerExecutionEnvironment,\n BlobIndexerImageAction,\n KnownBlobIndexerImageAction,\n BlobIndexerParsingMode,\n KnownBlobIndexerParsingMode,\n BlobIndexerPDFTextRotationAlgorithm,\n KnownBlobIndexerPDFTextRotationAlgorithm,\n TokenFilter as BaseTokenFilter,\n Similarity,\n LexicalTokenizer as BaseLexicalTokenizer,\n CognitiveServicesAccount as BaseCognitiveServicesAccount,\n SearchIndexerSkill as BaseSearchIndexerSkill,\n ScoringFunction as BaseScoringFunction,\n DataChangeDetectionPolicy as BaseDataChangeDetectionPolicy,\n LexicalAnalyzer as BaseLexicalAnalyzer,\n CharFilter as BaseCharFilter,\n DataDeletionDetectionPolicy as BaseDataDeletionDetectionPolicy,\n LexicalNormalizerName,\n KnownLexicalNormalizerName,\n CustomNormalizer,\n TokenFilterName,\n KnownTokenFilterName,\n CharFilterName,\n KnownCharFilterName,\n LexicalNormalizer as BaseLexicalNormalizer,\n SearchIndexerKnowledgeStore,\n SearchIndexerKnowledgeStoreProjection,\n SearchIndexerKnowledgeStoreFileProjectionSelector,\n SearchIndexerKnowledgeStoreBlobProjectionSelector,\n SearchIndexerKnowledgeStoreProjectionSelector,\n SearchIndexerKnowledgeStoreObjectProjectionSelector,\n SearchIndexerKnowledgeStoreTableProjectionSelector,\n PIIDetectionSkillMaskingMode,\n KnownPIIDetectionSkillMaskingMode,\n LineEnding,\n KnownLineEnding,\n SearchIndexerDataIdentity as BaseSearchIndexerDataIdentity,\n SearchIndexerCache,\n IndexerState,\n IndexerExecutionStatusDetail,\n KnownIndexerExecutionStatusDetail,\n IndexingMode,\n KnownIndexingMode,\n SemanticSettings,\n SemanticConfiguration,\n PrioritizedFields,\n SemanticField\n} from \"./generated/service/models\";\nexport { AzureKeyCredential } from \"@azure/core-auth\";\nexport { createSynonymMapFromFile } from \"./synonymMapHelper\";\n"]}
@@ -1 +1 @@
1
- {"version":3,"file":"indexModels.js","sourceRoot":"","sources":["../../src/indexModels.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;;AAwqBlC,6CAA6C","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT license.\n\nimport { OperationOptions } from \"@azure/core-client\";\nimport {\n QueryType,\n SearchMode,\n FacetResult,\n AutocompleteMode,\n IndexActionType,\n ScoringStatistics,\n QueryLanguage,\n Speller,\n Answers,\n CaptionResult,\n AnswerResult,\n Captions,\n QuerySpellerType,\n QueryAnswerType,\n QueryCaptionType\n} from \"./generated/data/models\";\nimport { PagedAsyncIterableIterator } from \"@azure/core-paging\";\n\n/**\n * Options for performing the count operation on the index.\n */\nexport type CountDocumentsOptions = OperationOptions;\n/**\n * Options for retrieving completion text for a partial searchText.\n */\nexport type AutocompleteOptions<Fields> = OperationOptions & AutocompleteRequest<Fields>;\n/**\n * Options for committing a full search request.\n */\nexport type SearchOptions<Fields> = OperationOptions & SearchRequestOptions<Fields>;\n/**\n * Options for retrieving suggestions based on the searchText.\n */\nexport type SuggestOptions<Fields> = OperationOptions & SuggestRequest<Fields>;\n\n/**\n * Options for SearchIndexingBufferedSender.\n */\nexport interface SearchIndexingBufferedSenderOptions {\n /**\n * Indicates if autoFlush is enabled.\n */\n autoFlush?: boolean;\n /**\n * Initial Batch Action Count.\n *\n * A batch request will be sent once the documents\n * reach the initialBatchActionCount.\n */\n initialBatchActionCount?: number;\n /**\n * Flush Window.\n *\n * A batch request will be sent after flushWindowInMs is\n * reached.\n */\n flushWindowInMs?: number;\n /**\n * Maximum number of Retries\n */\n maxRetriesPerAction?: number;\n /**\n * Delay between retries\n */\n throttlingDelayInMs?: number;\n /**\n * Max Delay between retries\n */\n maxThrottlingDelayInMs?: number;\n}\n\n/**\n * Options for SearchIndexingBufferedSenderUploadDocuments.\n */\nexport type SearchIndexingBufferedSenderUploadDocumentsOptions = OperationOptions;\n/**\n * Options for SearchIndexingBufferedSenderMergeDocuments.\n */\nexport type SearchIndexingBufferedSenderMergeDocumentsOptions = OperationOptions;\n/**\n * Options for SearchIndexingBufferedSenderMergeOrUploadDocuments.\n */\nexport type SearchIndexingBufferedSenderMergeOrUploadDocumentsOptions = OperationOptions;\n/**\n * Options for SearchIndexingBufferedSenderDeleteDocuments.\n */\nexport type SearchIndexingBufferedSenderDeleteDocumentsOptions = OperationOptions;\n/**\n * Options for SearchIndexingBufferedSenderFlushDocuments.\n */\nexport type SearchIndexingBufferedSenderFlushDocumentsOptions = OperationOptions;\n\n/**\n * Options for retrieving a single document.\n */\nexport interface GetDocumentOptions<Fields> extends OperationOptions {\n /**\n * List of field names to retrieve for the document; Any field not retrieved will be missing from\n * the returned document.\n */\n selectedFields?: Fields[];\n}\n\n/**\n * Options for the modify index batch operation.\n */\nexport interface IndexDocumentsOptions extends OperationOptions {\n /**\n * If true, will cause this operation to throw if any document operation\n * in the batch did not succeed.\n */\n throwOnAnyFailure?: boolean;\n}\n\n/**\n * Options for the upload documents operation.\n */\nexport type UploadDocumentsOptions = IndexDocumentsOptions;\n\n/**\n * Options for the merge documents operation.\n */\nexport type MergeDocumentsOptions = IndexDocumentsOptions;\n\n/**\n * Options for the merge or upload documents operation.\n */\nexport type MergeOrUploadDocumentsOptions = IndexDocumentsOptions;\n\n/**\n * Options for the delete documents operation.\n */\nexport type DeleteDocumentsOptions = IndexDocumentsOptions;\n\n/**\n * Arguments for retrieving the next page of search results.\n */\nexport interface ListSearchResultsPageSettings {\n /**\n * A token used for retrieving the next page of results when the server\n * enforces pagination.\n */\n continuationToken?: string;\n}\n\n/**\n * An iterator for search results of a paticular query. Will make requests\n * as needed during iteration. Use .byPage() to make one request to the server\n * per iteration.\n */\nexport type SearchIterator<Fields> = PagedAsyncIterableIterator<\n SearchResult<Fields>,\n SearchDocumentsPageResult<Fields>,\n ListSearchResultsPageSettings\n>;\n\n// BEGIN manually modified generated interfaces\n//\n// This section is for places where we have to manually fix issues\n// with interfaces from the generated code.\n// Mostly this is to allow modeling additionalProperties:true as generics.\n\n/**\n * Parameters for filtering, sorting, faceting, paging, and other search query behaviors.\n */\nexport interface SearchRequest {\n /**\n * A value that specifies whether to fetch the total count of results. Default is false. Setting\n * this value to true may have a performance impact. Note that the count returned is an\n * approximation.\n */\n includeTotalCount?: boolean;\n /**\n * The list of facet expressions to apply to the search query. Each facet expression contains a\n * field name, optionally followed by a comma-separated list of name:value pairs.\n */\n facets?: string[];\n /**\n * The OData $filter expression to apply to the search query.\n */\n filter?: string;\n /**\n * The comma-separated list of field names to use for hit highlights. Only searchable fields can\n * be used for hit highlighting.\n */\n highlightFields?: string;\n /**\n * A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is\n * &lt;/em&gt;.\n */\n highlightPostTag?: string;\n /**\n * A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default\n * is &lt;em&gt;.\n */\n highlightPreTag?: string;\n /**\n * A number between 0 and 100 indicating the percentage of the index that must be covered by a\n * search query in order for the query to be reported as a success. This parameter can be useful\n * for ensuring search availability even for services with only one replica. The default is 100.\n */\n minimumCoverage?: number;\n /**\n * The comma-separated list of OData $orderby expressions by which to sort the results. Each\n * expression can be either a field name or a call to either the geo.distance() or the\n * search.score() functions. Each expression can be followed by asc to indicate ascending, or\n * desc to indicate descending. The default is ascending order. Ties will be broken by the match\n * scores of documents. If no $orderby is specified, the default sort order is descending by\n * document match score. There can be at most 32 $orderby clauses.\n */\n orderBy?: string;\n /**\n * A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if\n * your query uses the Lucene query syntax. Possible values include: 'Simple', 'Full'\n */\n queryType?: QueryType;\n /**\n * A value that specifies whether we want to calculate scoring statistics (such as document\n * frequency) globally for more consistent scoring, or locally, for lower latency. The default is\n * 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global\n * scoring statistics can increase latency of search queries. Possible values include: 'Local',\n * 'Global'\n */\n scoringStatistics?: ScoringStatistics;\n /**\n * A value to be used to create a sticky session, which can help getting more consistent results.\n * As long as the same sessionId is used, a best-effort attempt will be made to target the same\n * replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the\n * load balancing of the requests across replicas and adversely affect the performance of the\n * search service. The value used as sessionId cannot start with a '_' character.\n */\n sessionId?: string;\n /**\n * The list of parameter values to be used in scoring functions (for example,\n * referencePointParameter) using the format name-values. For example, if the scoring profile\n * defines a function with a parameter called 'mylocation' the parameter string would be\n * \"mylocation--122.2,44.8\" (without the quotes).\n */\n scoringParameters?: string[];\n /**\n * The name of a scoring profile to evaluate match scores for matching documents in order to sort\n * the results.\n */\n scoringProfile?: string;\n /**\n * A full-text search query expression; Use \"*\" or omit this parameter to match all documents.\n */\n searchText?: string;\n /**\n * The comma-separated list of field names to which to scope the full-text search. When using\n * fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each\n * fielded search expression take precedence over any field names listed in this parameter.\n */\n searchFields?: string;\n /**\n * A value that specifies whether any or all of the search terms must be matched in order to\n * count the document as a match. Possible values include: 'Any', 'All'\n */\n searchMode?: SearchMode;\n /**\n * A value that specifies the language of the search query.\n */\n queryLanguage?: QueryLanguage;\n /**\n * A value that specified the type of the speller to use to spell-correct individual search\n * query terms.\n */\n speller?: QuerySpellerType;\n /**\n * A value that specifies whether answers should be returned as part of the search response.\n */\n answers?: QueryAnswerType;\n /**\n * The comma-separated list of fields to retrieve. If unspecified, all fields marked as\n * retrievable in the schema are included.\n */\n select?: string;\n /**\n * The number of search results to skip. This value cannot be greater than 100,000. If you need\n * to scan documents in sequence, but cannot use skip due to this limitation, consider using\n * orderby on a totally-ordered key and filter with a range query instead.\n */\n skip?: number;\n /**\n * The number of search results to retrieve. This can be used in conjunction with $skip to\n * implement client-side paging of search results. If results are truncated due to server-side\n * paging, the response will include a continuation token that can be used to issue another\n * Search request for the next page of results.\n */\n top?: number;\n /**\n * A value that specifies whether captions should be returned as part of the search response.\n */\n captions?: QueryCaptionType;\n /**\n * The comma-separated list of field names used for semantic search.\n */\n semanticFields?: string;\n}\n\n/**\n * Parameters for filtering, sorting, faceting, paging, and other search query behaviors.\n */\nexport interface SearchRequestOptions<Fields> {\n /**\n * A value that specifies whether to fetch the total count of results. Default is false. Setting\n * this value to true may have a performance impact. Note that the count returned is an\n * approximation.\n */\n includeTotalCount?: boolean;\n /**\n * The list of facet expressions to apply to the search query. Each facet expression contains a\n * field name, optionally followed by a comma-separated list of name:value pairs.\n */\n facets?: string[];\n /**\n * The OData $filter expression to apply to the search query.\n */\n filter?: string;\n /**\n * The comma-separated list of field names to use for hit highlights. Only searchable fields can\n * be used for hit highlighting.\n */\n highlightFields?: string;\n /**\n * A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is\n * &lt;/em&gt;.\n */\n highlightPostTag?: string;\n /**\n * A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default\n * is &lt;em&gt;.\n */\n highlightPreTag?: string;\n /**\n * A number between 0 and 100 indicating the percentage of the index that must be covered by a\n * search query in order for the query to be reported as a success. This parameter can be useful\n * for ensuring search availability even for services with only one replica. The default is 100.\n */\n minimumCoverage?: number;\n /**\n * The list of OData $orderby expressions by which to sort the results. Each\n * expression can be either a field name or a call to either the geo.distance() or the\n * search.score() functions. Each expression can be followed by asc to indicate ascending, or\n * desc to indicate descending. The default is ascending order. Ties will be broken by the match\n * scores of documents. If no $orderby is specified, the default sort order is descending by\n * document match score. There can be at most 32 $orderby clauses.\n */\n orderBy?: string[];\n /**\n * A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if\n * your query uses the Lucene query syntax. Possible values include: 'simple', 'full'\n */\n queryType?: QueryType;\n /**\n * The list of parameter values to be used in scoring functions (for example,\n * referencePointParameter) using the format name-values. For example, if the scoring profile\n * defines a function with a parameter called 'mylocation' the parameter string would be\n * \"mylocation--122.2,44.8\" (without the quotes).\n */\n scoringParameters?: string[];\n /**\n * The name of a scoring profile to evaluate match scores for matching documents in order to sort\n * the results.\n */\n scoringProfile?: string;\n /**\n * The comma-separated list of field names to which to scope the full-text search. When using\n * fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each\n * fielded search expression take precedence over any field names listed in this parameter.\n */\n searchFields?: Fields[];\n /**\n * The language of the query.\n */\n queryLanguage?: QueryLanguage;\n /**\n * Improve search recall by spell-correcting individual search query terms.\n */\n speller?: Speller;\n /**\n * This parameter is only valid if the query type is 'semantic'. If set, the query returns answers\n * extracted from key passages in the highest ranked documents. The number of answers returned can\n * be configured by appending the pipe character '|' followed by the 'count-\\<number of answers\\>' option\n * after the answers parameter value, such as 'extractive|count-3'. Default count is 1.\n */\n answers?: Answers;\n /**\n * A value that specifies whether any or all of the search terms must be matched in order to\n * count the document as a match. Possible values include: 'any', 'all'\n */\n searchMode?: SearchMode;\n /**\n * A value that specifies whether we want to calculate scoring statistics (such as document\n * frequency) globally for more consistent scoring, or locally, for lower latency. Possible\n * values include: 'Local', 'Global'\n */\n scoringStatistics?: ScoringStatistics;\n /**\n * A value to be used to create a sticky session, which can help to get more consistent results.\n * As long as the same sessionId is used, a best-effort attempt will be made to target the same\n * replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the\n * load balancing of the requests across replicas and adversely affect the performance of the\n * search service. The value used as sessionId cannot start with a '_' character.\n */\n sessionId?: string;\n /**\n * The list of fields to retrieve. If unspecified, all fields marked as\n * retrievable in the schema are included.\n */\n select?: Fields[];\n /**\n * The number of search results to skip. This value cannot be greater than 100,000. If you need\n * to scan documents in sequence, but cannot use skip due to this limitation, consider using\n * orderby on a totally-ordered key and filter with a range query instead.\n */\n skip?: number;\n /**\n * The number of search results to retrieve. This can be used in conjunction with $skip to\n * implement client-side paging of search results. If results are truncated due to server-side\n * paging, the response will include a continuation token that can be used to issue another\n * Search request for the next page of results.\n */\n top?: number;\n /**\n * This parameter is only valid if the query type is 'semantic'. If set, the query returns captions\n * extracted from key passages in the highest ranked documents. When Captions is set to 'extractive',\n * highlighting is enabled by default, and can be configured by appending the pipe character '|'\n * followed by the 'highlight-true'/'highlight-false' option, such as 'extractive|highlight-true'. Defaults to 'None'.\n */\n captions?: Captions;\n /**\n * The list of field names used for semantic search.\n */\n semanticFields?: string[];\n}\n\n/**\n * Contains a document found by a search query, plus associated metadata.\n */\nexport type SearchResult<T> = {\n /**\n * The relevance score of the document compared to other documents returned by the query.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly score: number;\n /**\n * The relevance score computed by the semantic ranker for the top search results. Search results are sorted by the RerankerScore first and then by the Score. RerankerScore is only returned for queries of type 'semantic'.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly rerankerScore?: number;\n /**\n * Text fragments from the document that indicate the matching search terms, organized by each\n * applicable field; null if hit highlighting was not enabled for the query.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly highlights?: { [k in keyof T]?: string[] };\n /**\n * Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type 'semantic'.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly captions?: CaptionResult[];\n\n document: T;\n};\n\n/**\n * Response containing search results from an index.\n */\nexport interface SearchDocumentsResultBase {\n /**\n * The total count of results found by the search operation, or null if the count was not\n * requested. If present, the count may be greater than the number of results in this response.\n * This can happen if you use the $top or $skip parameters, or if Azure Cognitive Search can't\n * return all the requested documents in a single Search response.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly count?: number;\n /**\n * A value indicating the percentage of the index that was included in the query, or null if\n * minimumCoverage was not specified in the request.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly coverage?: number;\n /**\n * The facet query results for the search operation, organized as a collection of buckets for\n * each faceted field; null if the query did not include any facet expressions.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly facets?: { [propertyName: string]: FacetResult[] };\n /**\n * The answers query results for the search operation; null if the answers query parameter was\n * not specified or set to 'none'.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly answers?: AnswerResult[];\n}\n\n/**\n * Response containing search results from an index.\n */\nexport interface SearchDocumentsResult<T> extends SearchDocumentsResultBase {\n /**\n * The sequence of results returned by the query.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly results: SearchIterator<T>;\n}\n\n/**\n * Response containing search page results from an index.\n */\nexport interface SearchDocumentsPageResult<T> extends SearchDocumentsResultBase {\n /**\n * The sequence of results returned by the query.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly results: SearchResult<T>[];\n /**\n * A token used for retrieving the next page of results when the server\n * enforces pagination.\n */\n continuationToken?: string;\n}\n\n/**\n * Parameters for filtering, sorting, fuzzy matching, and other suggestions query behaviors.\n */\nexport interface SuggestRequest<Fields> {\n /**\n * An OData expression that filters the documents considered for suggestions.\n */\n filter?: string;\n /**\n * A value indicating whether to use fuzzy matching for the suggestion query. Default is false.\n * When set to true, the query will find suggestions even if there's a substituted or missing\n * character in the search text. While this provides a better experience in some scenarios, it\n * comes at a performance cost as fuzzy suggestion searches are slower and consume more\n * resources.\n */\n useFuzzyMatching?: boolean;\n /**\n * A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted,\n * hit highlighting of suggestions is disabled.\n */\n highlightPostTag?: string;\n /**\n * A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If\n * omitted, hit highlighting of suggestions is disabled.\n */\n highlightPreTag?: string;\n /**\n * A number between 0 and 100 indicating the percentage of the index that must be covered by a\n * suggestion query in order for the query to be reported as a success. This parameter can be\n * useful for ensuring search availability even for services with only one replica. The default\n * is 80.\n */\n minimumCoverage?: number;\n /**\n * The list of OData $orderby expressions by which to sort the results. Each\n * expression can be either a field name or a call to either the geo.distance() or the\n * search.score() functions. Each expression can be followed by asc to indicate ascending, or\n * desc to indicate descending. The default is ascending order. Ties will be broken by the match\n * scores of documents. If no $orderby is specified, the default sort order is descending by\n * document match score. There can be at most 32 $orderby clauses.\n */\n orderBy?: string[];\n /**\n * The comma-separated list of field names to search for the specified search text. Target fields\n * must be included in the specified suggester.\n */\n searchFields?: Fields[];\n /**\n * The list of fields to retrieve. If unspecified, only the key field will be\n * included in the results.\n */\n select?: Fields[];\n /**\n * The number of suggestions to retrieve. This must be a value between 1 and 100. The default is\n * 5.\n */\n top?: number;\n}\n\n/**\n * A result containing a document found by a suggestion query, plus associated metadata.\n */\nexport type SuggestResult<T> = {\n /**\n * The text of the suggestion result.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly text: string;\n document: T;\n};\n\n/**\n * Response containing suggestion query results from an index.\n */\nexport interface SuggestDocumentsResult<T> {\n /**\n * The sequence of results returned by the query.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly results: SuggestResult<T>[];\n /**\n * A value indicating the percentage of the index that was included in the query, or null if\n * minimumCoverage was not set in the request.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly coverage?: number;\n}\n\n/**\n * Parameters for fuzzy matching, and other autocomplete query behaviors.\n */\nexport interface AutocompleteRequest<Fields> {\n /**\n * Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles\n * and 'oneTermWithContext' to use the current context while producing auto-completed terms.\n * Possible values include: 'oneTerm', 'twoTerms', 'oneTermWithContext'\n */\n autocompleteMode?: AutocompleteMode;\n /**\n * An OData expression that filters the documents used to produce completed terms for the\n * Autocomplete result.\n */\n filter?: string;\n /**\n * A value indicating whether to use fuzzy matching for the autocomplete query. Default is false.\n * When set to true, the query will autocomplete terms even if there's a substituted or missing\n * character in the search text. While this provides a better experience in some scenarios, it\n * comes at a performance cost as fuzzy autocomplete queries are slower and consume more\n * resources.\n */\n useFuzzyMatching?: boolean;\n /**\n * A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted,\n * hit highlighting is disabled.\n */\n highlightPostTag?: string;\n /**\n * A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If\n * omitted, hit highlighting is disabled.\n */\n highlightPreTag?: string;\n /**\n * A number between 0 and 100 indicating the percentage of the index that must be covered by an\n * autocomplete query in order for the query to be reported as a success. This parameter can be\n * useful for ensuring search availability even for services with only one replica. The default\n * is 80.\n */\n minimumCoverage?: number;\n /**\n * The comma-separated list of field names to consider when querying for auto-completed terms.\n * Target fields must be included in the specified suggester.\n */\n searchFields?: Fields[];\n /**\n * The number of auto-completed terms to retrieve. This must be a value between 1 and 100. The\n * default is 5.\n */\n top?: number;\n}\n\n/**\n * Represents an index action that operates on a document.\n */\nexport type IndexDocumentsAction<T> = {\n /**\n * The operation to perform on a document in an indexing batch. Possible values include:\n * 'upload', 'merge', 'mergeOrUpload', 'delete'\n */\n __actionType: IndexActionType;\n} & Partial<T>;\n\n// END manually modified generated interfaces\n"]}
1
+ {"version":3,"file":"indexModels.js","sourceRoot":"","sources":["../../src/indexModels.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;;AA6qBlC,6CAA6C","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT license.\n\nimport { OperationOptions } from \"@azure/core-client\";\nimport {\n QueryType,\n SearchMode,\n FacetResult,\n AutocompleteMode,\n IndexActionType,\n ScoringStatistics,\n QueryLanguage,\n Speller,\n Answers,\n CaptionResult,\n AnswerResult,\n Captions,\n QuerySpellerType,\n QueryAnswerType,\n QueryCaptionType\n} from \"./generated/data/models\";\nimport { PagedAsyncIterableIterator } from \"@azure/core-paging\";\n\n/**\n * Options for performing the count operation on the index.\n */\nexport type CountDocumentsOptions = OperationOptions;\n/**\n * Options for retrieving completion text for a partial searchText.\n */\nexport type AutocompleteOptions<Fields> = OperationOptions & AutocompleteRequest<Fields>;\n/**\n * Options for committing a full search request.\n */\nexport type SearchOptions<Fields> = OperationOptions & SearchRequestOptions<Fields>;\n/**\n * Options for retrieving suggestions based on the searchText.\n */\nexport type SuggestOptions<Fields> = OperationOptions & SuggestRequest<Fields>;\n\n/**\n * Options for SearchIndexingBufferedSender.\n */\nexport interface SearchIndexingBufferedSenderOptions {\n /**\n * Indicates if autoFlush is enabled.\n */\n autoFlush?: boolean;\n /**\n * Initial Batch Action Count.\n *\n * A batch request will be sent once the documents\n * reach the initialBatchActionCount.\n */\n initialBatchActionCount?: number;\n /**\n * Flush Window.\n *\n * A batch request will be sent after flushWindowInMs is\n * reached.\n */\n flushWindowInMs?: number;\n /**\n * Maximum number of Retries\n */\n maxRetriesPerAction?: number;\n /**\n * Delay between retries\n */\n throttlingDelayInMs?: number;\n /**\n * Max Delay between retries\n */\n maxThrottlingDelayInMs?: number;\n}\n\n/**\n * Options for SearchIndexingBufferedSenderUploadDocuments.\n */\nexport type SearchIndexingBufferedSenderUploadDocumentsOptions = OperationOptions;\n/**\n * Options for SearchIndexingBufferedSenderMergeDocuments.\n */\nexport type SearchIndexingBufferedSenderMergeDocumentsOptions = OperationOptions;\n/**\n * Options for SearchIndexingBufferedSenderMergeOrUploadDocuments.\n */\nexport type SearchIndexingBufferedSenderMergeOrUploadDocumentsOptions = OperationOptions;\n/**\n * Options for SearchIndexingBufferedSenderDeleteDocuments.\n */\nexport type SearchIndexingBufferedSenderDeleteDocumentsOptions = OperationOptions;\n/**\n * Options for SearchIndexingBufferedSenderFlushDocuments.\n */\nexport type SearchIndexingBufferedSenderFlushDocumentsOptions = OperationOptions;\n\n/**\n * Options for retrieving a single document.\n */\nexport interface GetDocumentOptions<Fields> extends OperationOptions {\n /**\n * List of field names to retrieve for the document; Any field not retrieved will be missing from\n * the returned document.\n */\n selectedFields?: Fields[];\n}\n\n/**\n * Options for the modify index batch operation.\n */\nexport interface IndexDocumentsOptions extends OperationOptions {\n /**\n * If true, will cause this operation to throw if any document operation\n * in the batch did not succeed.\n */\n throwOnAnyFailure?: boolean;\n}\n\n/**\n * Options for the upload documents operation.\n */\nexport type UploadDocumentsOptions = IndexDocumentsOptions;\n\n/**\n * Options for the merge documents operation.\n */\nexport type MergeDocumentsOptions = IndexDocumentsOptions;\n\n/**\n * Options for the merge or upload documents operation.\n */\nexport type MergeOrUploadDocumentsOptions = IndexDocumentsOptions;\n\n/**\n * Options for the delete documents operation.\n */\nexport type DeleteDocumentsOptions = IndexDocumentsOptions;\n\n/**\n * Arguments for retrieving the next page of search results.\n */\nexport interface ListSearchResultsPageSettings {\n /**\n * A token used for retrieving the next page of results when the server\n * enforces pagination.\n */\n continuationToken?: string;\n}\n\n/**\n * An iterator for search results of a paticular query. Will make requests\n * as needed during iteration. Use .byPage() to make one request to the server\n * per iteration.\n */\nexport type SearchIterator<Fields> = PagedAsyncIterableIterator<\n SearchResult<Fields>,\n SearchDocumentsPageResult<Fields>,\n ListSearchResultsPageSettings\n>;\n\n// BEGIN manually modified generated interfaces\n//\n// This section is for places where we have to manually fix issues\n// with interfaces from the generated code.\n// Mostly this is to allow modeling additionalProperties:true as generics.\n\n/**\n * Parameters for filtering, sorting, faceting, paging, and other search query behaviors.\n */\nexport interface SearchRequest {\n /**\n * A value that specifies whether to fetch the total count of results. Default is false. Setting\n * this value to true may have a performance impact. Note that the count returned is an\n * approximation.\n */\n includeTotalCount?: boolean;\n /**\n * The list of facet expressions to apply to the search query. Each facet expression contains a\n * field name, optionally followed by a comma-separated list of name:value pairs.\n */\n facets?: string[];\n /**\n * The OData $filter expression to apply to the search query.\n */\n filter?: string;\n /**\n * The comma-separated list of field names to use for hit highlights. Only searchable fields can\n * be used for hit highlighting.\n */\n highlightFields?: string;\n /**\n * A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is\n * &lt;/em&gt;.\n */\n highlightPostTag?: string;\n /**\n * A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default\n * is &lt;em&gt;.\n */\n highlightPreTag?: string;\n /**\n * A number between 0 and 100 indicating the percentage of the index that must be covered by a\n * search query in order for the query to be reported as a success. This parameter can be useful\n * for ensuring search availability even for services with only one replica. The default is 100.\n */\n minimumCoverage?: number;\n /**\n * The comma-separated list of OData $orderby expressions by which to sort the results. Each\n * expression can be either a field name or a call to either the geo.distance() or the\n * search.score() functions. Each expression can be followed by asc to indicate ascending, or\n * desc to indicate descending. The default is ascending order. Ties will be broken by the match\n * scores of documents. If no $orderby is specified, the default sort order is descending by\n * document match score. There can be at most 32 $orderby clauses.\n */\n orderBy?: string;\n /**\n * A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if\n * your query uses the Lucene query syntax. Possible values include: 'Simple', 'Full'\n */\n queryType?: QueryType;\n /**\n * A value that specifies whether we want to calculate scoring statistics (such as document\n * frequency) globally for more consistent scoring, or locally, for lower latency. The default is\n * 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global\n * scoring statistics can increase latency of search queries. Possible values include: 'Local',\n * 'Global'\n */\n scoringStatistics?: ScoringStatistics;\n /**\n * A value to be used to create a sticky session, which can help getting more consistent results.\n * As long as the same sessionId is used, a best-effort attempt will be made to target the same\n * replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the\n * load balancing of the requests across replicas and adversely affect the performance of the\n * search service. The value used as sessionId cannot start with a '_' character.\n */\n sessionId?: string;\n /**\n * The list of parameter values to be used in scoring functions (for example,\n * referencePointParameter) using the format name-values. For example, if the scoring profile\n * defines a function with a parameter called 'mylocation' the parameter string would be\n * \"mylocation--122.2,44.8\" (without the quotes).\n */\n scoringParameters?: string[];\n /**\n * The name of a scoring profile to evaluate match scores for matching documents in order to sort\n * the results.\n */\n scoringProfile?: string;\n /**\n * The name of a semantic configuration that will be used when processing documents for queries of\n * type semantic.\n */\n semanticConfiguration?: string;\n /**\n * A full-text search query expression; Use \"*\" or omit this parameter to match all documents.\n */\n searchText?: string;\n /**\n * The comma-separated list of field names to which to scope the full-text search. When using\n * fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each\n * fielded search expression take precedence over any field names listed in this parameter.\n */\n searchFields?: string;\n /**\n * A value that specifies whether any or all of the search terms must be matched in order to\n * count the document as a match. Possible values include: 'Any', 'All'\n */\n searchMode?: SearchMode;\n /**\n * A value that specifies the language of the search query.\n */\n queryLanguage?: QueryLanguage;\n /**\n * A value that specified the type of the speller to use to spell-correct individual search\n * query terms.\n */\n speller?: QuerySpellerType;\n /**\n * A value that specifies whether answers should be returned as part of the search response.\n */\n answers?: QueryAnswerType;\n /**\n * The comma-separated list of fields to retrieve. If unspecified, all fields marked as\n * retrievable in the schema are included.\n */\n select?: string;\n /**\n * The number of search results to skip. This value cannot be greater than 100,000. If you need\n * to scan documents in sequence, but cannot use skip due to this limitation, consider using\n * orderby on a totally-ordered key and filter with a range query instead.\n */\n skip?: number;\n /**\n * The number of search results to retrieve. This can be used in conjunction with $skip to\n * implement client-side paging of search results. If results are truncated due to server-side\n * paging, the response will include a continuation token that can be used to issue another\n * Search request for the next page of results.\n */\n top?: number;\n /**\n * A value that specifies whether captions should be returned as part of the search response.\n */\n captions?: QueryCaptionType;\n /**\n * The comma-separated list of field names used for semantic search.\n */\n semanticFields?: string;\n}\n\n/**\n * Parameters for filtering, sorting, faceting, paging, and other search query behaviors.\n */\nexport interface SearchRequestOptions<Fields> {\n /**\n * A value that specifies whether to fetch the total count of results. Default is false. Setting\n * this value to true may have a performance impact. Note that the count returned is an\n * approximation.\n */\n includeTotalCount?: boolean;\n /**\n * The list of facet expressions to apply to the search query. Each facet expression contains a\n * field name, optionally followed by a comma-separated list of name:value pairs.\n */\n facets?: string[];\n /**\n * The OData $filter expression to apply to the search query.\n */\n filter?: string;\n /**\n * The comma-separated list of field names to use for hit highlights. Only searchable fields can\n * be used for hit highlighting.\n */\n highlightFields?: string;\n /**\n * A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is\n * &lt;/em&gt;.\n */\n highlightPostTag?: string;\n /**\n * A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default\n * is &lt;em&gt;.\n */\n highlightPreTag?: string;\n /**\n * A number between 0 and 100 indicating the percentage of the index that must be covered by a\n * search query in order for the query to be reported as a success. This parameter can be useful\n * for ensuring search availability even for services with only one replica. The default is 100.\n */\n minimumCoverage?: number;\n /**\n * The list of OData $orderby expressions by which to sort the results. Each\n * expression can be either a field name or a call to either the geo.distance() or the\n * search.score() functions. Each expression can be followed by asc to indicate ascending, or\n * desc to indicate descending. The default is ascending order. Ties will be broken by the match\n * scores of documents. If no $orderby is specified, the default sort order is descending by\n * document match score. There can be at most 32 $orderby clauses.\n */\n orderBy?: string[];\n /**\n * A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if\n * your query uses the Lucene query syntax. Possible values include: 'simple', 'full'\n */\n queryType?: QueryType;\n /**\n * The list of parameter values to be used in scoring functions (for example,\n * referencePointParameter) using the format name-values. For example, if the scoring profile\n * defines a function with a parameter called 'mylocation' the parameter string would be\n * \"mylocation--122.2,44.8\" (without the quotes).\n */\n scoringParameters?: string[];\n /**\n * The name of a scoring profile to evaluate match scores for matching documents in order to sort\n * the results.\n */\n scoringProfile?: string;\n /**\n * The comma-separated list of field names to which to scope the full-text search. When using\n * fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each\n * fielded search expression take precedence over any field names listed in this parameter.\n */\n searchFields?: Fields[];\n /**\n * The language of the query.\n */\n queryLanguage?: QueryLanguage;\n /**\n * Improve search recall by spell-correcting individual search query terms.\n */\n speller?: Speller;\n /**\n * This parameter is only valid if the query type is 'semantic'. If set, the query returns answers\n * extracted from key passages in the highest ranked documents. The number of answers returned can\n * be configured by appending the pipe character '|' followed by the 'count-\\<number of answers\\>' option\n * after the answers parameter value, such as 'extractive|count-3'. Default count is 1.\n */\n answers?: Answers;\n /**\n * A value that specifies whether any or all of the search terms must be matched in order to\n * count the document as a match. Possible values include: 'any', 'all'\n */\n searchMode?: SearchMode;\n /**\n * A value that specifies whether we want to calculate scoring statistics (such as document\n * frequency) globally for more consistent scoring, or locally, for lower latency. Possible\n * values include: 'Local', 'Global'\n */\n scoringStatistics?: ScoringStatistics;\n /**\n * A value to be used to create a sticky session, which can help to get more consistent results.\n * As long as the same sessionId is used, a best-effort attempt will be made to target the same\n * replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the\n * load balancing of the requests across replicas and adversely affect the performance of the\n * search service. The value used as sessionId cannot start with a '_' character.\n */\n sessionId?: string;\n /**\n * The list of fields to retrieve. If unspecified, all fields marked as\n * retrievable in the schema are included.\n */\n select?: Fields[];\n /**\n * The number of search results to skip. This value cannot be greater than 100,000. If you need\n * to scan documents in sequence, but cannot use skip due to this limitation, consider using\n * orderby on a totally-ordered key and filter with a range query instead.\n */\n skip?: number;\n /**\n * The number of search results to retrieve. This can be used in conjunction with $skip to\n * implement client-side paging of search results. If results are truncated due to server-side\n * paging, the response will include a continuation token that can be used to issue another\n * Search request for the next page of results.\n */\n top?: number;\n /**\n * This parameter is only valid if the query type is 'semantic'. If set, the query returns captions\n * extracted from key passages in the highest ranked documents. When Captions is set to 'extractive',\n * highlighting is enabled by default, and can be configured by appending the pipe character '|'\n * followed by the 'highlight-true'/'highlight-false' option, such as 'extractive|highlight-true'. Defaults to 'None'.\n */\n captions?: Captions;\n /**\n * The list of field names used for semantic search.\n */\n semanticFields?: string[];\n}\n\n/**\n * Contains a document found by a search query, plus associated metadata.\n */\nexport type SearchResult<T> = {\n /**\n * The relevance score of the document compared to other documents returned by the query.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly score: number;\n /**\n * The relevance score computed by the semantic ranker for the top search results. Search results are sorted by the RerankerScore first and then by the Score. RerankerScore is only returned for queries of type 'semantic'.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly rerankerScore?: number;\n /**\n * Text fragments from the document that indicate the matching search terms, organized by each\n * applicable field; null if hit highlighting was not enabled for the query.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly highlights?: { [k in keyof T]?: string[] };\n /**\n * Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type 'semantic'.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly captions?: CaptionResult[];\n\n document: T;\n};\n\n/**\n * Response containing search results from an index.\n */\nexport interface SearchDocumentsResultBase {\n /**\n * The total count of results found by the search operation, or null if the count was not\n * requested. If present, the count may be greater than the number of results in this response.\n * This can happen if you use the $top or $skip parameters, or if Azure Cognitive Search can't\n * return all the requested documents in a single Search response.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly count?: number;\n /**\n * A value indicating the percentage of the index that was included in the query, or null if\n * minimumCoverage was not specified in the request.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly coverage?: number;\n /**\n * The facet query results for the search operation, organized as a collection of buckets for\n * each faceted field; null if the query did not include any facet expressions.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly facets?: { [propertyName: string]: FacetResult[] };\n /**\n * The answers query results for the search operation; null if the answers query parameter was\n * not specified or set to 'none'.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly answers?: AnswerResult[];\n}\n\n/**\n * Response containing search results from an index.\n */\nexport interface SearchDocumentsResult<T> extends SearchDocumentsResultBase {\n /**\n * The sequence of results returned by the query.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly results: SearchIterator<T>;\n}\n\n/**\n * Response containing search page results from an index.\n */\nexport interface SearchDocumentsPageResult<T> extends SearchDocumentsResultBase {\n /**\n * The sequence of results returned by the query.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly results: SearchResult<T>[];\n /**\n * A token used for retrieving the next page of results when the server\n * enforces pagination.\n */\n continuationToken?: string;\n}\n\n/**\n * Parameters for filtering, sorting, fuzzy matching, and other suggestions query behaviors.\n */\nexport interface SuggestRequest<Fields> {\n /**\n * An OData expression that filters the documents considered for suggestions.\n */\n filter?: string;\n /**\n * A value indicating whether to use fuzzy matching for the suggestion query. Default is false.\n * When set to true, the query will find suggestions even if there's a substituted or missing\n * character in the search text. While this provides a better experience in some scenarios, it\n * comes at a performance cost as fuzzy suggestion searches are slower and consume more\n * resources.\n */\n useFuzzyMatching?: boolean;\n /**\n * A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted,\n * hit highlighting of suggestions is disabled.\n */\n highlightPostTag?: string;\n /**\n * A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If\n * omitted, hit highlighting of suggestions is disabled.\n */\n highlightPreTag?: string;\n /**\n * A number between 0 and 100 indicating the percentage of the index that must be covered by a\n * suggestion query in order for the query to be reported as a success. This parameter can be\n * useful for ensuring search availability even for services with only one replica. The default\n * is 80.\n */\n minimumCoverage?: number;\n /**\n * The list of OData $orderby expressions by which to sort the results. Each\n * expression can be either a field name or a call to either the geo.distance() or the\n * search.score() functions. Each expression can be followed by asc to indicate ascending, or\n * desc to indicate descending. The default is ascending order. Ties will be broken by the match\n * scores of documents. If no $orderby is specified, the default sort order is descending by\n * document match score. There can be at most 32 $orderby clauses.\n */\n orderBy?: string[];\n /**\n * The comma-separated list of field names to search for the specified search text. Target fields\n * must be included in the specified suggester.\n */\n searchFields?: Fields[];\n /**\n * The list of fields to retrieve. If unspecified, only the key field will be\n * included in the results.\n */\n select?: Fields[];\n /**\n * The number of suggestions to retrieve. This must be a value between 1 and 100. The default is\n * 5.\n */\n top?: number;\n}\n\n/**\n * A result containing a document found by a suggestion query, plus associated metadata.\n */\nexport type SuggestResult<T> = {\n /**\n * The text of the suggestion result.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly text: string;\n document: T;\n};\n\n/**\n * Response containing suggestion query results from an index.\n */\nexport interface SuggestDocumentsResult<T> {\n /**\n * The sequence of results returned by the query.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly results: SuggestResult<T>[];\n /**\n * A value indicating the percentage of the index that was included in the query, or null if\n * minimumCoverage was not set in the request.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly coverage?: number;\n}\n\n/**\n * Parameters for fuzzy matching, and other autocomplete query behaviors.\n */\nexport interface AutocompleteRequest<Fields> {\n /**\n * Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles\n * and 'oneTermWithContext' to use the current context while producing auto-completed terms.\n * Possible values include: 'oneTerm', 'twoTerms', 'oneTermWithContext'\n */\n autocompleteMode?: AutocompleteMode;\n /**\n * An OData expression that filters the documents used to produce completed terms for the\n * Autocomplete result.\n */\n filter?: string;\n /**\n * A value indicating whether to use fuzzy matching for the autocomplete query. Default is false.\n * When set to true, the query will autocomplete terms even if there's a substituted or missing\n * character in the search text. While this provides a better experience in some scenarios, it\n * comes at a performance cost as fuzzy autocomplete queries are slower and consume more\n * resources.\n */\n useFuzzyMatching?: boolean;\n /**\n * A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted,\n * hit highlighting is disabled.\n */\n highlightPostTag?: string;\n /**\n * A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If\n * omitted, hit highlighting is disabled.\n */\n highlightPreTag?: string;\n /**\n * A number between 0 and 100 indicating the percentage of the index that must be covered by an\n * autocomplete query in order for the query to be reported as a success. This parameter can be\n * useful for ensuring search availability even for services with only one replica. The default\n * is 80.\n */\n minimumCoverage?: number;\n /**\n * The comma-separated list of field names to consider when querying for auto-completed terms.\n * Target fields must be included in the specified suggester.\n */\n searchFields?: Fields[];\n /**\n * The number of auto-completed terms to retrieve. This must be a value between 1 and 100. The\n * default is 5.\n */\n top?: number;\n}\n\n/**\n * Represents an index action that operates on a document.\n */\nexport type IndexDocumentsAction<T> = {\n /**\n * The operation to perform on a document in an indexing batch. Possible values include:\n * 'upload', 'merge', 'mergeOrUpload', 'delete'\n */\n __actionType: IndexActionType;\n} & Partial<T>;\n\n// END manually modified generated interfaces\n"]}
@@ -1 +1 @@
1
- {"version":3,"file":"serviceModels.js","sourceRoot":"","sources":["../../src/serviceModels.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;AAu2BlC,MAAM,UAAU,cAAc,CAAC,KAAkB;IAC/C,OAAO,KAAK,CAAC,IAAI,KAAK,iBAAiB,IAAI,KAAK,CAAC,IAAI,KAAK,6BAA6B,CAAC;AAC1F,CAAC;AA0TD;;;GAGG;AACH,MAAM,CAAN,IAAY,mBAmEX;AAnED,WAAY,mBAAmB;IAC7B;;;OAGG;IACH,0CAAmB,CAAA;IACnB;;;OAGG;IACH,8CAAuB,CAAA;IACvB;;;OAGG;IACH,6CAAsB,CAAA;IACtB;;;OAGG;IACH,wCAAiB,CAAA;IACjB;;;OAGG;IACH,8CAAuB,CAAA;IACvB;;OAEG;IACH,wDAAwD;IACxD,kFAA2D,CAAA;IAC3D;;OAEG;IACH,wDAAwD;IACxD,mGAA4E,CAAA;IAC5E;;;OAGG;IACH,sCAAe,CAAA;IACf;;;OAGG;IACH,0DAAmC,CAAA;IACnC;;;OAGG;IACH,0CAAmB,CAAA;IACnB;;;;OAIG;IACH,+CAAwB,CAAA;IACxB;;;OAGG;IACH,oDAA6B,CAAA;IAC7B;;;OAGG;IACH,gDAAyB,CAAA;AAC3B,CAAC,EAnEW,mBAAmB,KAAnB,mBAAmB,QAmE9B;AAED;;;GAGG;AACH,MAAM,CAAN,IAAY,qBAiLX;AAjLD,WAAY,qBAAqB;IAC/B;;;OAGG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,kDAAyB,CAAA;IACzB;;;;;OAKG;IACH,sDAA6B,CAAA;IAC7B;;;OAGG;IACH,iDAAwB,CAAA;IACxB;;;;OAIG;IACH,+CAAsB,CAAA;IACtB;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;;OAIG;IACH,oDAA2B,CAAA;IAC3B;;;;OAIG;IACH,mDAA0B,CAAA;IAC1B;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;;OAIG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,mEAA0C,CAAA;IAC1C;;;OAGG;IACH,mEAA0C,CAAA;IAC1C;;;OAGG;IACH,yDAAgC,CAAA;IAChC;;;OAGG;IACH,wCAAe,CAAA;IACf;;;OAGG;IACH,0CAAiB,CAAA;IACjB;;;OAGG;IACH,wCAAe,CAAA;IACf;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;;OAGG;IACH,2CAAkB,CAAA;IAClB;;;OAGG;IACH,uEAA8C,CAAA;IAC9C;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,mDAA0B,CAAA;IAC1B;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,iFAAwD,CAAA;IACxD;;;;OAIG;IACH,kFAAyD,CAAA;IACzD;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;;OAGG;IACH,sCAAa,CAAA;IACb;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,0CAAiB,CAAA;IACjB;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;OAEG;IACH,yDAAgC,CAAA;AAClC,CAAC,EAjLW,qBAAqB,KAArB,qBAAqB,QAiLhC;AAED;;;GAGG;AACH,MAAM,CAAN,IAAY,oBAMX;AAND,WAAY,oBAAoB;IAC9B;;;OAGG;IACH,gDAAwB,CAAA;AAC1B,CAAC,EANW,oBAAoB,KAApB,oBAAoB,QAM/B;AAED;;;;GAIG;AACH,MAAM,CAAN,IAAY,kBAoXX;AApXD,WAAY,kBAAkB;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,2DAAqC,CAAA;IACrC;;OAEG;IACH,qDAA+B,CAAA;IAC/B;;OAEG;IACH,2DAAqC,CAAA;IACrC;;OAEG;IACH,qDAA+B,CAAA;IAC/B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,uDAAiC,CAAA;IACjC;;OAEG;IACH,iDAA2B,CAAA;IAC3B;;OAEG;IACH,uDAAiC,CAAA;IACjC;;OAEG;IACH,iDAA2B,CAAA;IAC3B;;OAEG,CAAC,kDAA4B,CAAA;IAChC;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,mEAA6C,CAAA;IAC7C;;OAEG;IACH,6DAAuC,CAAA;IACvC;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,wDAAkC,CAAA;IAClC;;OAEG;IACH,gFAA0D,CAAA;IAC1D;;OAEG;IACH,yCAAmB,CAAA;IACnB;;OAEG;IACH,yCAAmB,CAAA;IACnB;;OAEG;IACH,uCAAiB,CAAA;IACjB;;OAEG;IACH,mCAAa,CAAA;IACb;;OAEG;IACH,+CAAyB,CAAA;AAC3B,CAAC,EApXW,kBAAkB,KAAlB,kBAAkB,QAoX7B;AA6ED,6CAA6C","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT license.\n\nimport { OperationOptions } from \"@azure/core-client\";\nimport {\n LuceneStandardAnalyzer,\n StopAnalyzer,\n CorsOptions,\n Suggester as SearchSuggester,\n ClassicTokenizer,\n EdgeNGramTokenizer,\n MicrosoftLanguageTokenizer,\n MicrosoftLanguageStemmingTokenizer,\n NGramTokenizer,\n PathHierarchyTokenizerV2 as PathHierarchyTokenizer,\n UaxUrlEmailTokenizer,\n AsciiFoldingTokenFilter,\n CjkBigramTokenFilter,\n CommonGramTokenFilter,\n DictionaryDecompounderTokenFilter,\n LengthTokenFilter,\n ElisionTokenFilter,\n KeepTokenFilter,\n KeywordMarkerTokenFilter,\n LimitTokenFilter,\n PatternCaptureTokenFilter,\n PatternReplaceTokenFilter,\n PhoneticTokenFilter,\n ShingleTokenFilter,\n SnowballTokenFilter,\n StemmerTokenFilter,\n StemmerOverrideTokenFilter,\n StopwordsTokenFilter,\n SynonymTokenFilter,\n TruncateTokenFilter,\n UniqueTokenFilter,\n WordDelimiterTokenFilter,\n MappingCharFilter,\n PatternReplaceCharFilter,\n DistanceScoringFunction,\n FreshnessScoringFunction,\n MagnitudeScoringFunction,\n TagScoringFunction,\n TextWeights,\n ScoringFunctionAggregation,\n RegexFlags,\n ConditionalSkill,\n KeyPhraseExtractionSkill,\n OcrSkill,\n ImageAnalysisSkill,\n LanguageDetectionSkill,\n ShaperSkill,\n MergeSkill,\n EntityRecognitionSkill,\n SentimentSkill,\n SplitSkill,\n PIIDetectionSkill,\n EntityRecognitionSkillV3,\n EntityLinkingSkill,\n SentimentSkillV3,\n CustomEntityLookupSkill,\n DocumentExtractionSkill,\n TextTranslationSkill,\n WebApiSkill,\n DefaultCognitiveServicesAccount,\n CognitiveServicesAccountKey,\n HighWaterMarkChangeDetectionPolicy,\n SqlIntegratedChangeTrackingPolicy,\n SearchIndexerDataUserAssignedIdentity,\n SearchIndexerDataNoneIdentity,\n SoftDeleteColumnDeletionDetectionPolicy,\n SearchIndexerDataSourceType,\n SearchIndexerDataContainer,\n LexicalAnalyzerName,\n ClassicSimilarity,\n BM25Similarity,\n EdgeNGramTokenFilterSide,\n ServiceCounters,\n ServiceLimits,\n FieldMapping,\n IndexingParameters,\n IndexingSchedule,\n LexicalNormalizerName,\n CustomNormalizer,\n SearchIndexerKnowledgeStore,\n SearchIndexerCache\n} from \"./generated/service/models\";\n\nimport { PagedAsyncIterableIterator } from \"@azure/core-paging\";\n\n/**\n * Options for a list skillsets operation.\n */\nexport type ListSkillsetsOptions = OperationOptions;\n\n/**\n * Options for a list synonymMaps operation.\n */\nexport type ListSynonymMapsOptions = OperationOptions;\n\n/**\n * Options for a list indexes operation.\n */\nexport type ListIndexesOptions = OperationOptions;\n\n/**\n * Options for a list indexers operation.\n */\nexport type ListIndexersOptions = OperationOptions;\n\n/**\n * Options for a list data sources operation.\n */\nexport type ListDataSourceConnectionsOptions = OperationOptions;\n\n/**\n * Options for get index operation.\n */\nexport type GetIndexOptions = OperationOptions;\n\n/**\n * Options for get skillset operation.\n */\nexport type GetSkillSetOptions = OperationOptions;\n\n/**\n * Options for get synonymmaps operation.\n */\nexport type GetSynonymMapsOptions = OperationOptions;\n\n/**\n * Options for get indexer operation.\n */\nexport type GetIndexerOptions = OperationOptions;\n\n/**\n * Options for get datasource operation.\n */\nexport type GetDataSourceConnectionOptions = OperationOptions;\n\n/**\n * Options for get index statistics operation.\n */\nexport type GetIndexStatisticsOptions = OperationOptions;\n\n/**\n * Statistics for a given index. Statistics are collected periodically and are not guaranteed to\n * always be up-to-date.\n */\nexport interface SearchIndexStatistics {\n /**\n * The number of documents in the index.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly documentCount: number;\n /**\n * The amount of storage in bytes consumed by the index.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly storageSize: number;\n}\n\n/**\n * Response from a get service statistics request. If successful, it includes service level\n * counters and limits.\n */\nexport interface SearchServiceStatistics {\n /**\n * Service level resource counters.\n */\n counters: ServiceCounters;\n /**\n * Service level general limits.\n */\n limits: ServiceLimits;\n}\n\n/**\n * Options for get service statistics operation.\n */\nexport type GetServiceStatisticsOptions = OperationOptions;\n\n/**\n * Options for get indexer status operation.\n */\nexport type GetIndexerStatusOptions = OperationOptions;\n\n/**\n * Options for reset indexer operation.\n */\nexport type ResetIndexerOptions = OperationOptions;\n\n/**\n * Options for run indexer operation.\n */\nexport type RunIndexerOptions = OperationOptions;\n\n/**\n * Options for create index operation.\n */\nexport type CreateIndexOptions = OperationOptions;\n\n/**\n * Options for create skillset operation.\n */\nexport type CreateSkillsetOptions = OperationOptions;\n\n/**\n * Options for create synonymmap operation.\n */\nexport type CreateSynonymMapOptions = OperationOptions;\n\n/**\n * Options for create indexer operation.\n */\nexport type CreateIndexerOptions = OperationOptions;\n\n/**\n * Options for create datasource operation.\n */\nexport type CreateDataSourceConnectionOptions = OperationOptions;\n\n/**\n * Options for create/update index operation.\n */\nexport interface CreateOrUpdateIndexOptions extends OperationOptions {\n /**\n * Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by\n * taking the index offline for at least a few seconds. This temporarily causes indexing and\n * query requests to fail. Performance and write availability of the index can be impaired for\n * several minutes after the index is updated, or longer for very large indexes.\n */\n allowIndexDowntime?: boolean;\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for reset docs operation.\n */\nexport interface ResetDocumentsOptions extends OperationOptions {\n /** document keys to be reset */\n documentKeys?: string[];\n /** datasource document identifiers to be reset */\n datasourceDocumentIds?: string[];\n /** If false, keys or ids will be appended to existing ones. If true, only the keys or ids in this payload will be queued to be re-ingested. */\n overwrite?: boolean;\n}\n\n/**\n * Options for reset skills operation.\n */\nexport interface ResetSkillsOptions extends OperationOptions {\n /** the names of skills to be reset. */\n skillNames?: string[];\n}\n\n/**\n * Options for create/update skillset operation.\n */\nexport interface CreateOrUpdateSkillsetOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n /**\n * Ignores cache reset requirements.\n */\n skipIndexerResetRequirementForCache?: boolean;\n /**\n * Disables cache reprocessing change detection.\n */\n disableCacheReprocessingChangeDetection?: boolean;\n}\n\n/**\n * Options for create/update synonymmap operation.\n */\nexport interface CreateOrUpdateSynonymMapOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for create/update indexer operation.\n */\nexport interface CreateorUpdateIndexerOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n /** Ignores cache reset requirements. */\n skipIndexerResetRequirementForCache?: boolean;\n /** Disables cache reprocessing change detection. */\n disableCacheReprocessingChangeDetection?: boolean;\n}\n\n/**\n * Options for create/update datasource operation.\n */\nexport interface CreateorUpdateDataSourceConnectionOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n /**\n * Ignores cache reset requirements.\n */\n skipIndexerResetRequirementForCache?: boolean;\n}\n\n/**\n * Options for delete index operation.\n */\nexport interface DeleteIndexOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete skillset operaion.\n */\nexport interface DeleteSkillsetOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete synonymmap operation.\n */\nexport interface DeleteSynonymMapOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete indexer operation.\n */\nexport interface DeleteIndexerOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete datasource operation.\n */\nexport interface DeleteDataSourceConnectionOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Specifies some text and analysis components used to break that text into tokens.\n */\nexport interface AnalyzeRequest {\n /**\n * The text to break into tokens.\n */\n text: string;\n /**\n * The name of the analyzer to use to break the given text. If this parameter is not specified,\n * you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually\n * exclusive. KnownAnalyzerNames is an enum containing known values.\n * NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.\n */\n analyzerName?: string;\n /**\n * The name of the tokenizer to use to break the given text. If this parameter is not specified,\n * you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually\n * exclusive. KnownTokenizerNames is an enum containing known values.\n * NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.\n */\n tokenizerName?: string;\n /**\n * The name of the normalizer to use to normalize the given text.\n */\n normalizerName?: LexicalNormalizerName;\n /**\n * An optional list of token filters to use when breaking the given text. This parameter can only\n * be set when using the tokenizer parameter.\n */\n tokenFilters?: string[];\n /**\n * An optional list of character filters to use when breaking the given text. This parameter can\n * only be set when using the tokenizer parameter.\n */\n charFilters?: string[];\n}\n\n/**\n * Options for analyze text operation.\n */\nexport type AnalyzeTextOptions = OperationOptions & AnalyzeRequest;\n\n// BEGIN manually modified generated interfaces\n//\n// This section is for places where we have to manually fix issues\n// with interfaces from the generated code.\n// One issue is that unions of discriminated types generated with\n// their abstract base class as a member.\n\n/**\n * Flexibly separates text into terms via a regular expression pattern. This analyzer is\n * implemented using Apache Lucene.\n */\nexport interface PatternAnalyzer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.PatternAnalyzer\";\n /**\n * The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores,\n * can only start and end with alphanumeric characters, and is limited to 128 characters.\n */\n name: string;\n /**\n * A value indicating whether terms should be lower-cased. Default is true. Default value: true.\n */\n lowerCaseTerms?: boolean;\n /**\n * A regular expression pattern to match token separators. Default is an expression that matches\n * one or more whitespace characters. Default value: `\\W+`.\n */\n pattern?: string;\n /**\n * Regular expression flags. Possible values include: 'CANON_EQ', 'CASE_INSENSITIVE', 'COMMENTS',\n * 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'\n */\n flags?: RegexFlags[];\n /**\n * A list of stopwords.\n */\n stopwords?: string[];\n}\n\n/**\n * Allows you to take control over the process of converting text into indexable/searchable tokens.\n * It's a user-defined configuration consisting of a single predefined tokenizer and one or more\n * filters. The tokenizer is responsible for breaking text into tokens, and the filters for\n * modifying tokens emitted by the tokenizer.\n */\nexport interface CustomAnalyzer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.CustomAnalyzer\";\n /**\n * The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores,\n * can only start and end with alphanumeric characters, and is limited to 128 characters.\n */\n name: string;\n /**\n * The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as\n * breaking a sentence into words. KnownTokenizerNames is an enum containing known values.\n */\n tokenizerName: string;\n /**\n * A list of token filters used to filter out or modify the tokens generated by a tokenizer. For\n * example, you can specify a lowercase filter that converts all characters to lowercase. The\n * filters are run in the order in which they are listed.\n */\n tokenFilters?: string[];\n /**\n * A list of character filters used to prepare input text before it is processed by the\n * tokenizer. For instance, they can replace certain characters or symbols. The filters are run\n * in the order in which they are listed.\n */\n charFilters?: string[];\n}\n\n/**\n * Contains the possible cases for Analyzer.\n */\nexport type LexicalAnalyzer =\n | CustomAnalyzer\n | PatternAnalyzer\n | LuceneStandardAnalyzer\n | StopAnalyzer;\n\n/**\n * Contains the possible cases for Skill.\n */\nexport type SearchIndexerSkill =\n | ConditionalSkill\n | KeyPhraseExtractionSkill\n | OcrSkill\n | ImageAnalysisSkill\n | LanguageDetectionSkill\n | ShaperSkill\n | MergeSkill\n | EntityRecognitionSkill\n | SentimentSkill\n | SplitSkill\n | PIIDetectionSkill\n | EntityRecognitionSkillV3\n | EntityLinkingSkill\n | SentimentSkillV3\n | CustomEntityLookupSkill\n | TextTranslationSkill\n | DocumentExtractionSkill\n | WebApiSkill;\n\n/**\n * Contains the possible cases for CognitiveServicesAccount.\n */\nexport type CognitiveServicesAccount =\n | DefaultCognitiveServicesAccount\n | CognitiveServicesAccountKey;\n/**\n * Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is\n * implemented using Apache Lucene.\n */\nexport interface PatternTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.PatternTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * A regular expression pattern to match token separators. Default is an expression that matches\n * one or more whitespace characters. Default value: `\\W+`.\n */\n pattern?: string;\n /**\n * Regular expression flags. Possible values include: 'CANON_EQ', 'CASE_INSENSITIVE', 'COMMENTS',\n * 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'\n */\n flags?: RegexFlags[];\n /**\n * The zero-based ordinal of the matching group in the regular expression pattern to extract into\n * tokens. Use -1 if you want to use the entire pattern to split the input into tokens,\n * irrespective of matching groups. Default is -1. Default value: -1.\n */\n group?: number;\n}\n/**\n * Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using\n * Apache Lucene.\n */\nexport interface LuceneStandardTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.StandardTokenizerV2\"\n | \"#Microsoft.Azure.Search.StandardTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The maximum token length. Default is 255. Tokens longer than the maximum length are split. The\n * maximum token length that can be used is 300 characters. Default value: 255.\n */\n maxTokenLength?: number;\n}\n\n/**\n * Generates n-grams of the given size(s) starting from the front or the back of an input token.\n * This token filter is implemented using Apache Lucene.\n */\nexport interface EdgeNGramTokenFilter {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.EdgeNGramTokenFilterV2\"\n | \"#Microsoft.Azure.Search.EdgeNGramTokenFilter\";\n /**\n * The name of the token filter. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of\n * maxGram. Default value: 1.\n */\n minGram?: number;\n /**\n * The maximum n-gram length. Default is 2. Maximum is 300. Default value: 2.\n */\n maxGram?: number;\n /**\n * Specifies which side of the input the n-gram should be generated from. Default is \"front\".\n * Possible values include: 'Front', 'Back'\n */\n side?: EdgeNGramTokenFilterSide;\n}\n\n/**\n * Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene.\n */\nexport interface KeywordTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.KeywordTokenizerV2\"\n | \"#Microsoft.Azure.Search.KeywordTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The maximum token length. Default is 256. Tokens longer than the maximum length are split. The\n * maximum token length that can be used is 300 characters. Default value: 256.\n */\n maxTokenLength?: number;\n}\n\n/**\n * Contains the possible cases for Tokenizer.\n */\nexport type LexicalTokenizer =\n | ClassicTokenizer\n | EdgeNGramTokenizer\n | KeywordTokenizer\n | MicrosoftLanguageTokenizer\n | MicrosoftLanguageStemmingTokenizer\n | NGramTokenizer\n | PathHierarchyTokenizer\n | PatternTokenizer\n | LuceneStandardTokenizer\n | UaxUrlEmailTokenizer;\n\n/**\n * Contains the possible cases for Similarity.\n */\nexport type SimilarityAlgorithm = ClassicSimilarity | BM25Similarity;\n\n/**\n * Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene.\n */\nexport interface NGramTokenFilter {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.NGramTokenFilterV2\"\n | \"#Microsoft.Azure.Search.NGramTokenFilter\";\n /**\n * The name of the token filter. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of\n * maxGram. Default value: 1.\n */\n minGram?: number;\n /**\n * The maximum n-gram length. Default is 2. Maximum is 300. Default value: 2.\n */\n maxGram?: number;\n}\n\n/**\n * Contains the possible cases for TokenFilter.\n */\nexport type TokenFilter =\n | AsciiFoldingTokenFilter\n | CjkBigramTokenFilter\n | CommonGramTokenFilter\n | DictionaryDecompounderTokenFilter\n | EdgeNGramTokenFilter\n | ElisionTokenFilter\n | KeepTokenFilter\n | KeywordMarkerTokenFilter\n | LengthTokenFilter\n | LimitTokenFilter\n | NGramTokenFilter\n | PatternCaptureTokenFilter\n | PatternReplaceTokenFilter\n | PhoneticTokenFilter\n | ShingleTokenFilter\n | SnowballTokenFilter\n | StemmerTokenFilter\n | StemmerOverrideTokenFilter\n | StopwordsTokenFilter\n | SynonymTokenFilter\n | TruncateTokenFilter\n | UniqueTokenFilter\n | WordDelimiterTokenFilter;\n\n/**\n * Contains the possible cases for CharFilter.\n */\nexport type CharFilter = MappingCharFilter | PatternReplaceCharFilter;\n\n/**\n * Contains the possible cases for LexicalNormalizer.\n */\nexport type LexicalNormalizer = CustomNormalizer;\n\n/**\n * Contains the possible cases for ScoringFunction.\n */\nexport type ScoringFunction =\n | DistanceScoringFunction\n | FreshnessScoringFunction\n | MagnitudeScoringFunction\n | TagScoringFunction;\n\n/**\n * Defines values for SearchFieldDataType.\n * Possible values include: 'Edm.String', 'Edm.Int32', 'Edm.Int64', 'Edm.Double', 'Edm.Boolean',\n * 'Edm.DateTimeOffset', 'Edm.GeographyPoint', 'Collection(Edm.String)',\n * 'Collection(Edm.Int32)', 'Collection(Edm.Int64)', 'Collection(Edm.Double)',\n * 'Collection(Edm.Boolean)', 'Collection(Edm.DateTimeOffset)', 'Collection(Edm.GeographyPoint)'\n * @readonly\n */\nexport type SearchFieldDataType =\n | \"Edm.String\"\n | \"Edm.Int32\"\n | \"Edm.Int64\"\n | \"Edm.Double\"\n | \"Edm.Boolean\"\n | \"Edm.DateTimeOffset\"\n | \"Edm.GeographyPoint\"\n | \"Collection(Edm.String)\"\n | \"Collection(Edm.Int32)\"\n | \"Collection(Edm.Int64)\"\n | \"Collection(Edm.Double)\"\n | \"Collection(Edm.Boolean)\"\n | \"Collection(Edm.DateTimeOffset)\"\n | \"Collection(Edm.GeographyPoint)\";\n\n/**\n * Defines values for ComplexDataType.\n * Possible values include: 'Edm.ComplexType', 'Collection(Edm.ComplexType)'\n * @readonly\n */\nexport type ComplexDataType = \"Edm.ComplexType\" | \"Collection(Edm.ComplexType)\";\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport type SearchField = SimpleField | ComplexField;\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport interface SimpleField {\n /**\n * The name of the field, which must be unique within the fields collection of the index or\n * parent field.\n */\n name: string;\n /**\n * The data type of the field. Possible values include: 'Edm.String', 'Edm.Int32', 'Edm.Int64',\n * 'Edm.Double', 'Edm.Boolean', 'Edm.DateTimeOffset', 'Edm.GeographyPoint'\n * 'Collection(Edm.String)', 'Collection(Edm.Int32)', 'Collection(Edm.Int64)',\n * 'Collection(Edm.Double)', 'Collection(Edm.Boolean)', 'Collection(Edm.DateTimeOffset)',\n * 'Collection(Edm.GeographyPoint)'\n */\n type: SearchFieldDataType;\n /**\n * A value indicating whether the field uniquely identifies documents in the index. Exactly one\n * top-level field in each index must be chosen as the key field and it must be of type\n * Edm.String. Key fields can be used to look up documents directly and update or delete specific\n * documents. Default is false.\n */\n key?: boolean;\n /**\n * A value indicating whether the field can be returned in a search result. You can enable this\n * option if you want to use a field (for example, margin) as a filter, sorting, or scoring\n * mechanism but do not want the field to be visible to the end user. This property must be false\n * for key fields. This property can be changed on existing fields.\n * Disabling this property does not cause any increase in index storage requirements.\n * Default is false.\n */\n hidden?: boolean;\n /**\n * A value indicating whether the field is full-text searchable. This means it will undergo\n * analysis such as word-breaking during indexing. If you set a searchable field to a value like\n * \"sunny day\", internally it will be split into the individual tokens \"sunny\" and \"day\". This\n * enables full-text searches for these terms. This property must be false for simple\n * fields of other non-string data types.\n * Note: searchable fields consume extra space in your index since Azure Cognitive Search will store an\n * additional tokenized version of the field value for full-text searches.\n * Defaults to false for simple fields.\n */\n searchable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in $filter queries. Filterable\n * differs from searchable in how strings are handled. Fields of type Edm.String or\n * Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are\n * for exact matches only. For example, if you set such a field f to \"sunny day\", $filter=f eq\n * 'sunny' will find no matches, but $filter=f eq 'sunny day' will.\n * Default is false.\n */\n filterable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in $orderby expressions. By\n * default Azure Cognitive Search sorts results by score, but in many experiences users will want\n * to sort by fields in the documents. A simple field can be sortable only if it is single-valued\n * (it has a single value in the scope of the parent document). Simple collection fields cannot\n * be sortable, since they are multi-valued. Simple sub-fields of complex collections are also\n * multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent\n * field, or an ancestor field, that's the complex collection. The default for sortable is false.\n */\n sortable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in facet queries. Typically\n * used in a presentation of search results that includes hit count by category (for example,\n * search for digital cameras and see hits by brand, by megapixels, by price, and so on).\n * Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable.\n * Default is false for all other simple fields.\n */\n facetable?: boolean;\n /**\n * The name of the language analyzer to use for the field. This option can be used only with\n * searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer.\n * Once the analyzer is chosen, it cannot be changed for the field.\n * KnownAnalyzerNames is an enum containing known values.\n */\n analyzerName?: LexicalAnalyzerName;\n /**\n * The name of the analyzer used at search time for the field. This option can be used only with\n * searchable fields. It must be set together with indexAnalyzer and it cannot be set together\n * with the analyzer option. This analyzer can be updated on an existing field.\n * KnownAnalyzerNames is an enum containing known values.\n */\n searchAnalyzerName?: LexicalAnalyzerName;\n /**\n * The name of the analyzer used at indexing time for the field. This option can be used only\n * with searchable fields. It must be set together with searchAnalyzer and it cannot be set\n * together with the analyzer option. Once the analyzer is chosen, it cannot be changed for the\n * field. KnownAnalyzerNames is an enum containing known values.\n */\n indexAnalyzerName?: LexicalAnalyzerName;\n /**\n * A list of the names of synonym maps to associate with this field. This option can be used only\n * with searchable fields. Currently only one synonym map per field is supported. Assigning a\n * synonym map to a field ensures that query terms targeting that field are expanded at\n * query-time using the rules in the synonym map. This attribute can be changed on existing\n * fields.\n */\n synonymMapNames?: string[];\n /**\n * The name of the normalizer used at indexing time for the field.\n */\n normalizerName?: LexicalNormalizerName;\n}\n\nexport function isComplexField(field: SearchField): field is ComplexField {\n return field.type === \"Edm.ComplexType\" || field.type === \"Collection(Edm.ComplexType)\";\n}\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport interface ComplexField {\n /**\n * The name of the field, which must be unique within the fields collection of the index or\n * parent field.\n */\n name: string;\n /**\n * The data type of the field.\n * Possible values include: 'Edm.ComplexType','Collection(Edm.ComplexType)'\n */\n type: ComplexDataType;\n /**\n * A list of sub-fields.\n */\n fields: SearchField[];\n}\n\n/**\n * Represents a synonym map definition.\n */\nexport interface SynonymMap {\n /**\n * The name of the synonym map.\n */\n name: string;\n /**\n * An array of synonym rules in the specified synonym map format.\n */\n synonyms: string[];\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your data when you want full assurance\n * that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you\n * have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore\n * attempts to set this property to null. You can change this property as needed if you want to\n * rotate your encryption key; Your data will be unaffected. Encryption with customer-managed\n * keys is not available for free search services, and is only available for paid services\n * created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n /**\n * The ETag of the synonym map.\n */\n etag?: string;\n}\n\n/**\n * An iterator for listing the indexes that exist in the Search service. Will make requests\n * as needed during iteration. Use .byPage() to make one request to the server\n * per iteration.\n */\n// eslint-disable-next-line @typescript-eslint/ban-types\nexport type IndexIterator = PagedAsyncIterableIterator<SearchIndex, SearchIndex[], {}>;\n\n/**\n * An iterator for listing the indexes that exist in the Search service. Will make requests\n * as needed during iteration. Use .byPage() to make one request to the server\n * per iteration.\n */\n// eslint-disable-next-line @typescript-eslint/ban-types\nexport type IndexNameIterator = PagedAsyncIterableIterator<string, string[], {}>;\n\n/**\n * Represents a search index definition, which describes the fields and search behavior of an\n * index.\n */\nexport interface SearchIndex {\n /**\n * The name of the index.\n */\n name: string;\n /**\n * The fields of the index.\n */\n fields: SearchField[];\n /**\n * The scoring profiles for the index.\n */\n scoringProfiles?: ScoringProfile[];\n /**\n * The name of the scoring profile to use if none is specified in the query. If this property is\n * not set and no scoring profile is specified in the query, then default scoring (tf-idf) will\n * be used.\n */\n defaultScoringProfile?: string;\n /**\n * Options to control Cross-Origin Resource Sharing (CORS) for the index.\n */\n corsOptions?: CorsOptions;\n /**\n * The suggesters for the index.\n */\n suggesters?: SearchSuggester[];\n /**\n * The analyzers for the index.\n */\n analyzers?: LexicalAnalyzer[];\n /**\n * The tokenizers for the index.\n */\n tokenizers?: LexicalTokenizer[];\n /**\n * The token filters for the index.\n */\n tokenFilters?: TokenFilter[];\n /**\n * The character filters for the index.\n */\n charFilters?: CharFilter[];\n /**\n * The normalizers for the index.\n */\n normalizers?: LexicalNormalizer[];\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your data when you want full assurance\n * that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you\n * have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore\n * attempts to set this property to null. You can change this property as needed if you want to\n * rotate your encryption key; Your data will be unaffected. Encryption with customer-managed\n * keys is not available for free search services, and is only available for paid services\n * created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n /**\n * The type of similarity algorithm to be used when scoring and ranking the documents matching a\n * search query. The similarity algorithm can only be defined at index creation time and cannot\n * be modified on existing indexes. If null, the ClassicSimilarity algorithm is used.\n */\n similarity?: SimilarityAlgorithm;\n /**\n * The ETag of the index.\n */\n etag?: string;\n}\n\n/**\n * Represents an indexer.\n */\nexport interface SearchIndexer {\n /**\n * The name of the indexer.\n */\n name: string;\n /**\n * The description of the indexer.\n */\n description?: string;\n /**\n * The name of the datasource from which this indexer reads data.\n */\n dataSourceName: string;\n /**\n * The name of the skillset executing with this indexer.\n */\n skillsetName?: string;\n /**\n * The name of the index to which this indexer writes data.\n */\n targetIndexName: string;\n /**\n * The schedule for this indexer.\n */\n schedule?: IndexingSchedule;\n /**\n * Parameters for indexer execution.\n */\n parameters?: IndexingParameters;\n /**\n * Defines mappings between fields in the data source and corresponding target fields in the\n * index.\n */\n fieldMappings?: FieldMapping[];\n /**\n * Output field mappings are applied after enrichment and immediately before indexing.\n */\n outputFieldMappings?: FieldMapping[];\n /**\n * A value indicating whether the indexer is disabled. Default is false. Default value: false.\n */\n isDisabled?: boolean;\n /**\n * The ETag of the indexer.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your indexer definition (as well as\n * indexer execution status) when you want full assurance that no one, not even Microsoft, can\n * decrypt them in Azure Cognitive Search. Once you have encrypted your indexer definition, it\n * will always remain encrypted. Azure Cognitive Search will ignore attempts to set this property\n * to null. You can change this property as needed if you want to rotate your encryption key;\n * Your indexer definition (and indexer execution status) will be unaffected. Encryption with\n * customer-managed keys is not available for free search services, and is only available for\n * paid services created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n /**\n * Adds caching to an enrichment pipeline to allow for incremental modification steps without\n * having to rebuild the index every time.\n */\n cache?: SearchIndexerCache;\n}\n\n/**\n * A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be\n * used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym\n * maps.\n */\nexport interface SearchResourceEncryptionKey {\n /**\n * The name of your Azure Key Vault key to be used to encrypt your data at rest.\n */\n keyName: string;\n /**\n * The version of your Azure Key Vault key to be used to encrypt your data at rest.\n */\n keyVersion: string;\n /**\n * The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be\n * used to encrypt your data at rest. An example URI might be\n * https://my-keyvault-name.vault.azure.net.\n */\n vaultUrl: string;\n /**\n * An AAD Application ID that was granted the required access permissions to the Azure Key Vault\n * that is to be used when encrypting your data at rest. The Application ID should not be\n * confused with the Object ID for your AAD Application.\n */\n applicationId?: string;\n /**\n * The authentication key of the specified AAD application.\n */\n applicationSecret?: string;\n /**\n * An explicit managed identity to use for this encryption key. If not specified and the access\n * credentials property is null, the system-assigned managed identity is used. On update to the\n * resource, if the explicit identity is unspecified, it remains unchanged. If \"none\" is specified,\n * the value of this property is cleared.\n */\n identity?: SearchIndexerDataIdentity;\n}\n\n/**\n * A list of skills.\n */\nexport interface SearchIndexerSkillset {\n /**\n * The name of the skillset.\n */\n name: string;\n /**\n * The description of the skillset.\n */\n description?: string;\n /**\n * A list of skills in the skillset.\n */\n skills: SearchIndexerSkill[];\n /**\n * Details about cognitive services to be used when running skills.\n */\n cognitiveServicesAccount?: CognitiveServicesAccount;\n /**\n * Definition of additional projections to azure blob, table, or files, of enriched data.\n */\n knowledgeStore?: SearchIndexerKnowledgeStore;\n /**\n * The ETag of the skillset.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your skillset definition when you want\n * full assurance that no one, not even Microsoft, can decrypt your skillset definition in Azure\n * Cognitive Search. Once you have encrypted your skillset definition, it will always remain\n * encrypted. Azure Cognitive Search will ignore attempts to set this property to null. You can\n * change this property as needed if you want to rotate your encryption key; Your skillset\n * definition will be unaffected. Encryption with customer-managed keys is not available for free\n * search services, and is only available for paid services created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n\n/**\n * Defines parameters for a search index that influence scoring in search queries.\n */\nexport interface ScoringProfile {\n /**\n * The name of the scoring profile.\n */\n name: string;\n /**\n * Parameters that boost scoring based on text matches in certain index fields.\n */\n textWeights?: TextWeights;\n /**\n * The collection of functions that influence the scoring of documents.\n */\n functions?: ScoringFunction[];\n /**\n * A value indicating how the results of individual scoring functions should be combined.\n * Defaults to \"Sum\". Ignored if there are no scoring functions. Possible values include: 'sum',\n * 'average', 'minimum', 'maximum', 'firstMatching'\n */\n functionAggregation?: ScoringFunctionAggregation;\n}\n\n/**\n * Defines values for TokenizerName.\n * @readonly\n */\nexport enum KnownTokenizerNames {\n /**\n * Grammar-based tokenizer that is suitable for processing most European-language documents. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html\n */\n Classic = \"classic\",\n /**\n * Tokenizes the input from an edge into n-grams of the given size(s). See\n * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html\n */\n EdgeNGram = \"edgeNGram\",\n /**\n * Emits the entire input as a single token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html\n */\n Keyword = \"keyword_v2\",\n /**\n * Divides text at non-letters. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LetterTokenizer.html\n */\n Letter = \"letter\",\n /**\n * Divides text at non-letters and converts them to lower case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseTokenizer.html\n */\n Lowercase = \"lowercase\",\n /**\n * Divides text using language-specific rules.\n */\n // eslint-disable-next-line @typescript-eslint/no-shadow\n MicrosoftLanguageTokenizer = \"microsoft_language_tokenizer\",\n /**\n * Divides text using language-specific rules and reduces words to their base forms.\n */\n // eslint-disable-next-line @typescript-eslint/no-shadow\n MicrosoftLanguageStemmingTokenizer = \"microsoft_language_stemming_tokenizer\",\n /**\n * Tokenizes the input into n-grams of the given size(s). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html\n */\n NGram = \"nGram\",\n /**\n * Tokenizer for path-like hierarchies. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html\n */\n PathHierarchy = \"path_hierarchy_v2\",\n /**\n * Tokenizer that uses regex pattern matching to construct distinct tokens. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html\n */\n Pattern = \"pattern\",\n /**\n * Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop\n * filter. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html\n */\n Standard = \"standard_v2\",\n /**\n * Tokenizes urls and emails as one token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html\n */\n UaxUrlEmail = \"uax_url_email\",\n /**\n * Divides text at whitespace. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html\n */\n Whitespace = \"whitespace\"\n}\n\n/**\n * Defines values for TokenFilterName.\n * @readonly\n */\nexport enum KnownTokenFilterNames {\n /**\n * A token filter that applies the Arabic normalizer to normalize the orthography. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html\n */\n ArabicNormalization = \"arabic_normalization\",\n /**\n * Strips all characters after an apostrophe (including the apostrophe itself). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html\n */\n Apostrophe = \"apostrophe\",\n /**\n * Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127\n * ASCII characters (the \"Basic Latin\" Unicode block) into their ASCII equivalents, if such\n * equivalents exist. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html\n */\n AsciiFolding = \"asciifolding\",\n /**\n * Forms bigrams of CJK terms that are generated from StandardTokenizer. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html\n */\n CjkBigram = \"cjk_bigram\",\n /**\n * Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic\n * Latin, and half-width Katakana variants into the equivalent Kana. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html\n */\n CjkWidth = \"cjk_width\",\n /**\n * Removes English possessives, and dots from acronyms. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html\n */\n Classic = \"classic\",\n /**\n * Construct bigrams for frequently occurring terms while indexing. Single terms are still\n * indexed too, with bigrams overlaid. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html\n */\n CommonGram = \"common_grams\",\n /**\n * Generates n-grams of the given size(s) starting from the front or the back of an input token.\n * See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html\n */\n EdgeNGram = \"edgeNGram_v2\",\n /**\n * Removes elisions. For example, \"l'avion\" (the plane) will be converted to \"avion\" (plane). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html\n */\n Elision = \"elision\",\n /**\n * Normalizes German characters according to the heuristics of the German2 snowball algorithm.\n * See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html\n */\n GermanNormalization = \"german_normalization\",\n /**\n * Normalizes text in Hindi to remove some differences in spelling variations. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html\n */\n HindiNormalization = \"hindi_normalization\",\n /**\n * Normalizes the Unicode representation of text in Indian languages. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html\n */\n IndicNormalization = \"indic_normalization\",\n /**\n * Emits each incoming token twice, once as keyword and once as non-keyword. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html\n */\n KeywordRepeat = \"keyword_repeat\",\n /**\n * A high-performance kstem filter for English. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html\n */\n KStem = \"kstem\",\n /**\n * Removes words that are too long or too short. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html\n */\n Length = \"length\",\n /**\n * Limits the number of tokens while indexing. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html\n */\n Limit = \"limit\",\n /**\n * Normalizes token text to lower case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm\n */\n Lowercase = \"lowercase\",\n /**\n * Generates n-grams of the given size(s). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html\n */\n NGram = \"nGram_v2\",\n /**\n * Applies normalization for Persian. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html\n */\n PersianNormalization = \"persian_normalization\",\n /**\n * Create tokens for phonetic matches. See\n * https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html\n */\n Phonetic = \"phonetic\",\n /**\n * Uses the Porter stemming algorithm to transform the token stream. See\n * http://tartarus.org/~martin/PorterStemmer\n */\n PorterStem = \"porter_stem\",\n /**\n * Reverses the token string. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html\n */\n Reverse = \"reverse\",\n /**\n * Normalizes use of the interchangeable Scandinavian characters. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html\n */\n ScandinavianNormalization = \"scandinavian_normalization\",\n /**\n * Folds Scandinavian characters åÅäæÄÆ-&gt;a and öÖøØ-&gt;o. It also discriminates against use\n * of double vowels aa, ae, ao, oe and oo, leaving just the first one. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html\n */\n ScandinavianFoldingNormalization = \"scandinavian_folding\",\n /**\n * Creates combinations of tokens as a single token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html\n */\n Shingle = \"shingle\",\n /**\n * A filter that stems words using a Snowball-generated stemmer. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html\n */\n Snowball = \"snowball\",\n /**\n * Normalizes the Unicode representation of Sorani text. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html\n */\n SoraniNormalization = \"sorani_normalization\",\n /**\n * Language specific stemming filter. See\n * https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters\n */\n Stemmer = \"stemmer\",\n /**\n * Removes stop words from a token stream. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html\n */\n Stopwords = \"stopwords\",\n /**\n * Trims leading and trailing whitespace from tokens. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html\n */\n Trim = \"trim\",\n /**\n * Truncates the terms to a specific length. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html\n */\n Truncate = \"truncate\",\n /**\n * Filters out tokens with same text as the previous token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html\n */\n Unique = \"unique\",\n /**\n * Normalizes token text to upper case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html\n */\n Uppercase = \"uppercase\",\n /**\n * Splits words into subwords and performs optional transformations on subword groups.\n */\n WordDelimiter = \"word_delimiter\"\n}\n\n/**\n * Defines values for CharFilterName.\n * @readonly\n */\nexport enum KnownCharFilterNames {\n /**\n * A character filter that attempts to strip out HTML constructs. See\n * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.html\n */\n HtmlStrip = \"html_strip\"\n}\n\n/**\n * Defines values for AnalyzerName.\n * See https://docs.microsoft.com/rest/api/searchservice/Language-support\n * @readonly\n */\nexport enum KnownAnalyzerNames {\n /**\n * Arabic\n */\n ArMicrosoft = \"ar.microsoft\",\n /**\n * Arabic\n */\n ArLucene = \"ar.lucene\",\n /**\n * Armenian\n */\n HyLucene = \"hy.lucene\",\n /**\n * Bangla\n */\n BnMicrosoft = \"bn.microsoft\",\n /**\n * Basque\n */\n EuLucene = \"eu.lucene\",\n /**\n * Bulgarian\n */\n BgMicrosoft = \"bg.microsoft\",\n /**\n * Bulgarian\n */\n BgLucene = \"bg.lucene\",\n /**\n * Catalan\n */\n CaMicrosoft = \"ca.microsoft\",\n /**\n * Catalan\n */\n CaLucene = \"ca.lucene\",\n /**\n * Chinese Simplified\n */\n ZhHansMicrosoft = \"zh-Hans.microsoft\",\n /**\n * Chinese Simplified\n */\n ZhHansLucene = \"zh-Hans.lucene\",\n /**\n * Chinese Traditional\n */\n ZhHantMicrosoft = \"zh-Hant.microsoft\",\n /**\n * Chinese Traditional\n */\n ZhHantLucene = \"zh-Hant.lucene\",\n /**\n * Croatian\n */\n HrMicrosoft = \"hr.microsoft\",\n /**\n * Czech\n */\n CsMicrosoft = \"cs.microsoft\",\n /**\n * Czech\n */\n CsLucene = \"cs.lucene\",\n /**\n * Danish\n */\n DaMicrosoft = \"da.microsoft\",\n /**\n * Danish\n */\n DaLucene = \"da.lucene\",\n /**\n * Dutch\n */\n NlMicrosoft = \"nl.microsoft\",\n /**\n * Dutch\n */\n NlLucene = \"nl.lucene\",\n /**\n * English\n */\n EnMicrosoft = \"en.microsoft\",\n /**\n * English\n */\n EnLucene = \"en.lucene\",\n /**\n * Estonian\n */\n EtMicrosoft = \"et.microsoft\",\n /**\n * Finnish\n */\n FiMicrosoft = \"fi.microsoft\",\n /**\n * Finnish\n */\n FiLucene = \"fi.lucene\",\n /**\n * French\n */\n FrMicrosoft = \"fr.microsoft\",\n /**\n * French\n */\n FrLucene = \"fr.lucene\",\n /**\n * Galician\n */\n GlLucene = \"gl.lucene\",\n /**\n * German\n */\n DeMicrosoft = \"de.microsoft\",\n /**\n * German\n */\n DeLucene = \"de.lucene\",\n /**\n * Greek\n */\n ElMicrosoft = \"el.microsoft\",\n /**\n * Greek\n */\n ElLucene = \"el.lucene\",\n /**\n * Gujarati\n */\n GuMicrosoft = \"gu.microsoft\",\n /**\n * Hebrew\n */\n HeMicrosoft = \"he.microsoft\",\n /**\n * Hindi\n */\n HiMicrosoft = \"hi.microsoft\",\n /**\n * Hindi\n */\n HiLucene = \"hi.lucene\",\n /**\n * Hungarian\n */\n HuMicrosoft = \"hu.microsoft\",\n /**\n * Hungarian\n */\n HuLucene = \"hu.lucene\",\n /**\n * Icelandic\n */\n IsMicrosoft = \"is.microsoft\",\n /**\n * Indonesian (Bahasa)\n */\n IdMicrosoft = \"id.microsoft\",\n /**\n * Indonesian (Bahasa)\n */\n IdLucene = \"id.lucene\",\n /**\n * Irish\n */\n GaLucene = \"ga.lucene\",\n /**\n * Italian\n */\n ItMicrosoft = \"it.microsoft\",\n /**\n * Italian\n */\n ItLucene = \"it.lucene\",\n /**\n * Japanese\n */\n JaMicrosoft = \"ja.microsoft\",\n /**\n * Japanese\n */\n JaLucene = \"ja.lucene\",\n /**\n * Kannada\n */\n KnMicrosoft = \"kn.microsoft\",\n /**\n * Korean\n */\n KoMicrosoft = \"ko.microsoft\",\n /**\n * Korean\n */\n KoLucene = \"ko.lucene\",\n /**\n * Latvian\n */\n LvMicrosoft = \"lv.microsoft\",\n /**\n * Latvian\n */\n LvLucene = \"lv.lucene\",\n /**\n * Lithuanian\n */\n LtMicrosoft = \"lt.microsoft\",\n /**\n * Malayalam\n */\n MlMicrosoft = \"ml.microsoft\",\n /**\n * Malay (Latin)\n */\n MsMicrosoft = \"ms.microsoft\",\n /**\n * Marathi\n */\n MrMicrosoft = \"mr.microsoft\",\n /**\n * Norwegian\n */\n NbMicrosoft = \"nb.microsoft\",\n /**\n * Norwegian\n */\n NoLucene = \"no.lucene\",\n /**\n * Persian\n */\n FaLucene = \"fa.lucene\",\n /**\n * Polish\n */\n PlMicrosoft = \"pl.microsoft\",\n /**\n * Polish\n */\n PlLucene = \"pl.lucene\",\n /**\n * Portuguese (Brazil)\n */\n PtBRMicrosoft = \"pt-BR.microsoft\",\n /**\n * Portuguese (Brazil)\n */\n PtBRLucene = \"pt-BR.lucene\",\n /**\n * Portuguese (Portugal)\n */\n PtPTMicrosoft = \"pt-PT.microsoft\",\n /**\n * Portuguese (Portugal)\n */\n PtPTLucene = \"pt-PT.lucene\",\n /**\n * Punjabi\n */ PaMicrosoft = \"pa.microsoft\",\n /**\n * Romanian\n */\n RoMicrosoft = \"ro.microsoft\",\n /**\n * Romanian\n */\n RoLucene = \"ro.lucene\",\n /**\n * Russian\n */\n RuMicrosoft = \"ru.microsoft\",\n /**\n * Russian\n */\n RuLucene = \"ru.lucene\",\n /**\n * Serbian (Cyrillic)\n */\n SrCyrillicMicrosoft = \"sr-cyrillic.microsoft\",\n /**\n * Serbian (Latin)\n */\n SrLatinMicrosoft = \"sr-latin.microsoft\",\n /**\n * Slovak\n */\n SkMicrosoft = \"sk.microsoft\",\n /**\n * Slovenian\n */\n SlMicrosoft = \"sl.microsoft\",\n /**\n * Spanish\n */\n EsMicrosoft = \"es.microsoft\",\n /**\n * Spanish\n */\n EsLucene = \"es.lucene\",\n /**\n * Swedish\n */\n SvMicrosoft = \"sv.microsoft\",\n /**\n * Swedish\n */\n SvLucene = \"sv.lucene\",\n /**\n * Tamil\n */\n TaMicrosoft = \"ta.microsoft\",\n /**\n * Telugu\n */\n TeMicrosoft = \"te.microsoft\",\n /**\n * Thai\n */\n ThMicrosoft = \"th.microsoft\",\n /**\n * Thai\n */\n ThLucene = \"th.lucene\",\n /**\n * Turkish\n */\n TrMicrosoft = \"tr.microsoft\",\n /**\n * Turkish\n */\n TrLucene = \"tr.lucene\",\n /**\n * Ukrainian\n */\n UkMicrosoft = \"uk.microsoft\",\n /**\n * Urdu\n */\n UrMicrosoft = \"ur.microsoft\",\n /**\n * Vietnamese\n */\n ViMicrosoft = \"vi.microsoft\",\n /**\n * See: https://lucene.apache.org/core/6_6_1/core/org/apache/lucene/analysis/standard/StandardAnalyzer.html\n */\n StandardLucene = \"standard.lucene\",\n /**\n * See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html\n */\n StandardAsciiFoldingLucene = \"standardasciifolding.lucene\",\n /**\n * Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names.\n */\n Keyword = \"keyword\",\n /**\n * Flexibly separates text into terms via a regular expression pattern.\n */\n Pattern = \"pattern\",\n /**\n * Divides text at non-letters and converts them to lower case.\n */\n Simple = \"simple\",\n /**\n * Divides text at non-letters; Applies the lowercase and stopword token filters.\n */\n Stop = \"stop\",\n /**\n * An analyzer that uses the whitespace tokenizer.\n */\n Whitespace = \"whitespace\"\n}\n\n/**\n * Contains the possible cases for DataChangeDetectionPolicy.\n */\nexport type DataChangeDetectionPolicy =\n | HighWaterMarkChangeDetectionPolicy\n | SqlIntegratedChangeTrackingPolicy;\n\n/**\n * Contains the possible cases for SearchIndexerDataIdentity.\n */\nexport type SearchIndexerDataIdentity =\n | SearchIndexerDataNoneIdentity\n | SearchIndexerDataUserAssignedIdentity;\n\n/**\n * Contains the possible cases for DataDeletionDetectionPolicy.\n */\nexport type DataDeletionDetectionPolicy = SoftDeleteColumnDeletionDetectionPolicy;\n\n/**\n * Represents a datasource definition, which can be used to configure an indexer.\n */\nexport interface SearchIndexerDataSourceConnection {\n /**\n * The name of the datasource.\n */\n name: string;\n /**\n * The description of the datasource.\n */\n description?: string;\n /**\n * The type of the datasource. Possible values include: 'AzureSql', 'CosmosDb', 'AzureBlob',\n * 'AzureTable', 'MySql', 'AdlsGen2'\n */\n type: SearchIndexerDataSourceType;\n /**\n * The connection string for the datasource.\n */\n connectionString?: string;\n /**\n * The data container for the datasource.\n */\n container: SearchIndexerDataContainer;\n /**\n * An explicit managed identity to use for this datasource. If not specified and the connection\n * string is a managed identity, the system-assigned managed identity is used. If not specified,\n * the value remains unchanged. If \"none\" is specified, the value of this property is cleared.\n */\n identity?: SearchIndexerDataIdentity;\n /**\n * The data change detection policy for the datasource.\n */\n dataChangeDetectionPolicy?: DataChangeDetectionPolicy;\n /**\n * The data deletion detection policy for the datasource.\n */\n dataDeletionDetectionPolicy?: DataDeletionDetectionPolicy;\n /**\n * The ETag of the DataSource.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your datasource definition when you want\n * full assurance that no one, not even Microsoft, can decrypt your data source definition in\n * Azure Cognitive Search. Once you have encrypted your data source definition, it will always\n * remain encrypted. Azure Cognitive Search will ignore attempts to set this property to null.\n * You can change this property as needed if you want to rotate your encryption key; Your\n * datasource definition will be unaffected. Encryption with customer-managed keys is not\n * available for free search services, and is only available for paid services created on or\n * after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n// END manually modified generated interfaces\n"]}
1
+ {"version":3,"file":"serviceModels.js","sourceRoot":"","sources":["../../src/serviceModels.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;AAw2BlC,MAAM,UAAU,cAAc,CAAC,KAAkB;IAC/C,OAAO,KAAK,CAAC,IAAI,KAAK,iBAAiB,IAAI,KAAK,CAAC,IAAI,KAAK,6BAA6B,CAAC;AAC1F,CAAC;AA8TD;;;GAGG;AACH,MAAM,CAAN,IAAY,mBAmEX;AAnED,WAAY,mBAAmB;IAC7B;;;OAGG;IACH,0CAAmB,CAAA;IACnB;;;OAGG;IACH,8CAAuB,CAAA;IACvB;;;OAGG;IACH,6CAAsB,CAAA;IACtB;;;OAGG;IACH,wCAAiB,CAAA;IACjB;;;OAGG;IACH,8CAAuB,CAAA;IACvB;;OAEG;IACH,wDAAwD;IACxD,kFAA2D,CAAA;IAC3D;;OAEG;IACH,wDAAwD;IACxD,mGAA4E,CAAA;IAC5E;;;OAGG;IACH,sCAAe,CAAA;IACf;;;OAGG;IACH,0DAAmC,CAAA;IACnC;;;OAGG;IACH,0CAAmB,CAAA;IACnB;;;;OAIG;IACH,+CAAwB,CAAA;IACxB;;;OAGG;IACH,oDAA6B,CAAA;IAC7B;;;OAGG;IACH,gDAAyB,CAAA;AAC3B,CAAC,EAnEW,mBAAmB,KAAnB,mBAAmB,QAmE9B;AAED;;;GAGG;AACH,MAAM,CAAN,IAAY,qBAiLX;AAjLD,WAAY,qBAAqB;IAC/B;;;OAGG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,kDAAyB,CAAA;IACzB;;;;;OAKG;IACH,sDAA6B,CAAA;IAC7B;;;OAGG;IACH,iDAAwB,CAAA;IACxB;;;;OAIG;IACH,+CAAsB,CAAA;IACtB;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;;OAIG;IACH,oDAA2B,CAAA;IAC3B;;;;OAIG;IACH,mDAA0B,CAAA;IAC1B;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;;OAIG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,mEAA0C,CAAA;IAC1C;;;OAGG;IACH,mEAA0C,CAAA;IAC1C;;;OAGG;IACH,yDAAgC,CAAA;IAChC;;;OAGG;IACH,wCAAe,CAAA;IACf;;;OAGG;IACH,0CAAiB,CAAA;IACjB;;;OAGG;IACH,wCAAe,CAAA;IACf;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;;OAGG;IACH,2CAAkB,CAAA;IAClB;;;OAGG;IACH,uEAA8C,CAAA;IAC9C;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,mDAA0B,CAAA;IAC1B;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,iFAAwD,CAAA;IACxD;;;;OAIG;IACH,kFAAyD,CAAA;IACzD;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;;OAGG;IACH,sCAAa,CAAA;IACb;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,0CAAiB,CAAA;IACjB;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;OAEG;IACH,yDAAgC,CAAA;AAClC,CAAC,EAjLW,qBAAqB,KAArB,qBAAqB,QAiLhC;AAED;;;GAGG;AACH,MAAM,CAAN,IAAY,oBAMX;AAND,WAAY,oBAAoB;IAC9B;;;OAGG;IACH,gDAAwB,CAAA;AAC1B,CAAC,EANW,oBAAoB,KAApB,oBAAoB,QAM/B;AAED;;;;GAIG;AACH,MAAM,CAAN,IAAY,kBAoXX;AApXD,WAAY,kBAAkB;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,2DAAqC,CAAA;IACrC;;OAEG;IACH,qDAA+B,CAAA;IAC/B;;OAEG;IACH,2DAAqC,CAAA;IACrC;;OAEG;IACH,qDAA+B,CAAA;IAC/B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,uDAAiC,CAAA;IACjC;;OAEG;IACH,iDAA2B,CAAA;IAC3B;;OAEG;IACH,uDAAiC,CAAA;IACjC;;OAEG;IACH,iDAA2B,CAAA;IAC3B;;OAEG,CAAC,kDAA4B,CAAA;IAChC;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,mEAA6C,CAAA;IAC7C;;OAEG;IACH,6DAAuC,CAAA;IACvC;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,wDAAkC,CAAA;IAClC;;OAEG;IACH,gFAA0D,CAAA;IAC1D;;OAEG;IACH,yCAAmB,CAAA;IACnB;;OAEG;IACH,yCAAmB,CAAA;IACnB;;OAEG;IACH,uCAAiB,CAAA;IACjB;;OAEG;IACH,mCAAa,CAAA;IACb;;OAEG;IACH,+CAAyB,CAAA;AAC3B,CAAC,EApXW,kBAAkB,KAAlB,kBAAkB,QAoX7B;AA6ED,6CAA6C","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT license.\n\nimport { OperationOptions } from \"@azure/core-client\";\nimport {\n LuceneStandardAnalyzer,\n StopAnalyzer,\n CorsOptions,\n Suggester as SearchSuggester,\n ClassicTokenizer,\n EdgeNGramTokenizer,\n MicrosoftLanguageTokenizer,\n MicrosoftLanguageStemmingTokenizer,\n NGramTokenizer,\n PathHierarchyTokenizerV2 as PathHierarchyTokenizer,\n UaxUrlEmailTokenizer,\n AsciiFoldingTokenFilter,\n CjkBigramTokenFilter,\n CommonGramTokenFilter,\n DictionaryDecompounderTokenFilter,\n LengthTokenFilter,\n ElisionTokenFilter,\n KeepTokenFilter,\n KeywordMarkerTokenFilter,\n LimitTokenFilter,\n PatternCaptureTokenFilter,\n PatternReplaceTokenFilter,\n PhoneticTokenFilter,\n ShingleTokenFilter,\n SnowballTokenFilter,\n StemmerTokenFilter,\n StemmerOverrideTokenFilter,\n StopwordsTokenFilter,\n SynonymTokenFilter,\n TruncateTokenFilter,\n UniqueTokenFilter,\n WordDelimiterTokenFilter,\n MappingCharFilter,\n PatternReplaceCharFilter,\n DistanceScoringFunction,\n FreshnessScoringFunction,\n MagnitudeScoringFunction,\n TagScoringFunction,\n TextWeights,\n ScoringFunctionAggregation,\n RegexFlags,\n ConditionalSkill,\n KeyPhraseExtractionSkill,\n OcrSkill,\n ImageAnalysisSkill,\n LanguageDetectionSkill,\n ShaperSkill,\n MergeSkill,\n EntityRecognitionSkill,\n SentimentSkill,\n SplitSkill,\n PIIDetectionSkill,\n EntityRecognitionSkillV3,\n EntityLinkingSkill,\n SentimentSkillV3,\n CustomEntityLookupSkill,\n DocumentExtractionSkill,\n TextTranslationSkill,\n WebApiSkill,\n DefaultCognitiveServicesAccount,\n CognitiveServicesAccountKey,\n HighWaterMarkChangeDetectionPolicy,\n SqlIntegratedChangeTrackingPolicy,\n SearchIndexerDataUserAssignedIdentity,\n SearchIndexerDataNoneIdentity,\n SoftDeleteColumnDeletionDetectionPolicy,\n SearchIndexerDataSourceType,\n SearchIndexerDataContainer,\n LexicalAnalyzerName,\n ClassicSimilarity,\n BM25Similarity,\n EdgeNGramTokenFilterSide,\n ServiceCounters,\n ServiceLimits,\n FieldMapping,\n IndexingParameters,\n IndexingSchedule,\n LexicalNormalizerName,\n CustomNormalizer,\n SearchIndexerKnowledgeStore,\n SearchIndexerCache,\n SemanticSettings\n} from \"./generated/service/models\";\n\nimport { PagedAsyncIterableIterator } from \"@azure/core-paging\";\n\n/**\n * Options for a list skillsets operation.\n */\nexport type ListSkillsetsOptions = OperationOptions;\n\n/**\n * Options for a list synonymMaps operation.\n */\nexport type ListSynonymMapsOptions = OperationOptions;\n\n/**\n * Options for a list indexes operation.\n */\nexport type ListIndexesOptions = OperationOptions;\n\n/**\n * Options for a list indexers operation.\n */\nexport type ListIndexersOptions = OperationOptions;\n\n/**\n * Options for a list data sources operation.\n */\nexport type ListDataSourceConnectionsOptions = OperationOptions;\n\n/**\n * Options for get index operation.\n */\nexport type GetIndexOptions = OperationOptions;\n\n/**\n * Options for get skillset operation.\n */\nexport type GetSkillSetOptions = OperationOptions;\n\n/**\n * Options for get synonymmaps operation.\n */\nexport type GetSynonymMapsOptions = OperationOptions;\n\n/**\n * Options for get indexer operation.\n */\nexport type GetIndexerOptions = OperationOptions;\n\n/**\n * Options for get datasource operation.\n */\nexport type GetDataSourceConnectionOptions = OperationOptions;\n\n/**\n * Options for get index statistics operation.\n */\nexport type GetIndexStatisticsOptions = OperationOptions;\n\n/**\n * Statistics for a given index. Statistics are collected periodically and are not guaranteed to\n * always be up-to-date.\n */\nexport interface SearchIndexStatistics {\n /**\n * The number of documents in the index.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly documentCount: number;\n /**\n * The amount of storage in bytes consumed by the index.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly storageSize: number;\n}\n\n/**\n * Response from a get service statistics request. If successful, it includes service level\n * counters and limits.\n */\nexport interface SearchServiceStatistics {\n /**\n * Service level resource counters.\n */\n counters: ServiceCounters;\n /**\n * Service level general limits.\n */\n limits: ServiceLimits;\n}\n\n/**\n * Options for get service statistics operation.\n */\nexport type GetServiceStatisticsOptions = OperationOptions;\n\n/**\n * Options for get indexer status operation.\n */\nexport type GetIndexerStatusOptions = OperationOptions;\n\n/**\n * Options for reset indexer operation.\n */\nexport type ResetIndexerOptions = OperationOptions;\n\n/**\n * Options for run indexer operation.\n */\nexport type RunIndexerOptions = OperationOptions;\n\n/**\n * Options for create index operation.\n */\nexport type CreateIndexOptions = OperationOptions;\n\n/**\n * Options for create skillset operation.\n */\nexport type CreateSkillsetOptions = OperationOptions;\n\n/**\n * Options for create synonymmap operation.\n */\nexport type CreateSynonymMapOptions = OperationOptions;\n\n/**\n * Options for create indexer operation.\n */\nexport type CreateIndexerOptions = OperationOptions;\n\n/**\n * Options for create datasource operation.\n */\nexport type CreateDataSourceConnectionOptions = OperationOptions;\n\n/**\n * Options for create/update index operation.\n */\nexport interface CreateOrUpdateIndexOptions extends OperationOptions {\n /**\n * Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by\n * taking the index offline for at least a few seconds. This temporarily causes indexing and\n * query requests to fail. Performance and write availability of the index can be impaired for\n * several minutes after the index is updated, or longer for very large indexes.\n */\n allowIndexDowntime?: boolean;\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for reset docs operation.\n */\nexport interface ResetDocumentsOptions extends OperationOptions {\n /** document keys to be reset */\n documentKeys?: string[];\n /** datasource document identifiers to be reset */\n datasourceDocumentIds?: string[];\n /** If false, keys or ids will be appended to existing ones. If true, only the keys or ids in this payload will be queued to be re-ingested. */\n overwrite?: boolean;\n}\n\n/**\n * Options for reset skills operation.\n */\nexport interface ResetSkillsOptions extends OperationOptions {\n /** the names of skills to be reset. */\n skillNames?: string[];\n}\n\n/**\n * Options for create/update skillset operation.\n */\nexport interface CreateOrUpdateSkillsetOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n /**\n * Ignores cache reset requirements.\n */\n skipIndexerResetRequirementForCache?: boolean;\n /**\n * Disables cache reprocessing change detection.\n */\n disableCacheReprocessingChangeDetection?: boolean;\n}\n\n/**\n * Options for create/update synonymmap operation.\n */\nexport interface CreateOrUpdateSynonymMapOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for create/update indexer operation.\n */\nexport interface CreateorUpdateIndexerOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n /** Ignores cache reset requirements. */\n skipIndexerResetRequirementForCache?: boolean;\n /** Disables cache reprocessing change detection. */\n disableCacheReprocessingChangeDetection?: boolean;\n}\n\n/**\n * Options for create/update datasource operation.\n */\nexport interface CreateorUpdateDataSourceConnectionOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n /**\n * Ignores cache reset requirements.\n */\n skipIndexerResetRequirementForCache?: boolean;\n}\n\n/**\n * Options for delete index operation.\n */\nexport interface DeleteIndexOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete skillset operaion.\n */\nexport interface DeleteSkillsetOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete synonymmap operation.\n */\nexport interface DeleteSynonymMapOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete indexer operation.\n */\nexport interface DeleteIndexerOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete datasource operation.\n */\nexport interface DeleteDataSourceConnectionOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Specifies some text and analysis components used to break that text into tokens.\n */\nexport interface AnalyzeRequest {\n /**\n * The text to break into tokens.\n */\n text: string;\n /**\n * The name of the analyzer to use to break the given text. If this parameter is not specified,\n * you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually\n * exclusive. KnownAnalyzerNames is an enum containing known values.\n * NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.\n */\n analyzerName?: string;\n /**\n * The name of the tokenizer to use to break the given text. If this parameter is not specified,\n * you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually\n * exclusive. KnownTokenizerNames is an enum containing known values.\n * NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.\n */\n tokenizerName?: string;\n /**\n * The name of the normalizer to use to normalize the given text.\n */\n normalizerName?: LexicalNormalizerName;\n /**\n * An optional list of token filters to use when breaking the given text. This parameter can only\n * be set when using the tokenizer parameter.\n */\n tokenFilters?: string[];\n /**\n * An optional list of character filters to use when breaking the given text. This parameter can\n * only be set when using the tokenizer parameter.\n */\n charFilters?: string[];\n}\n\n/**\n * Options for analyze text operation.\n */\nexport type AnalyzeTextOptions = OperationOptions & AnalyzeRequest;\n\n// BEGIN manually modified generated interfaces\n//\n// This section is for places where we have to manually fix issues\n// with interfaces from the generated code.\n// One issue is that unions of discriminated types generated with\n// their abstract base class as a member.\n\n/**\n * Flexibly separates text into terms via a regular expression pattern. This analyzer is\n * implemented using Apache Lucene.\n */\nexport interface PatternAnalyzer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.PatternAnalyzer\";\n /**\n * The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores,\n * can only start and end with alphanumeric characters, and is limited to 128 characters.\n */\n name: string;\n /**\n * A value indicating whether terms should be lower-cased. Default is true. Default value: true.\n */\n lowerCaseTerms?: boolean;\n /**\n * A regular expression pattern to match token separators. Default is an expression that matches\n * one or more whitespace characters. Default value: `\\W+`.\n */\n pattern?: string;\n /**\n * Regular expression flags. Possible values include: 'CANON_EQ', 'CASE_INSENSITIVE', 'COMMENTS',\n * 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'\n */\n flags?: RegexFlags[];\n /**\n * A list of stopwords.\n */\n stopwords?: string[];\n}\n\n/**\n * Allows you to take control over the process of converting text into indexable/searchable tokens.\n * It's a user-defined configuration consisting of a single predefined tokenizer and one or more\n * filters. The tokenizer is responsible for breaking text into tokens, and the filters for\n * modifying tokens emitted by the tokenizer.\n */\nexport interface CustomAnalyzer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.CustomAnalyzer\";\n /**\n * The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores,\n * can only start and end with alphanumeric characters, and is limited to 128 characters.\n */\n name: string;\n /**\n * The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as\n * breaking a sentence into words. KnownTokenizerNames is an enum containing known values.\n */\n tokenizerName: string;\n /**\n * A list of token filters used to filter out or modify the tokens generated by a tokenizer. For\n * example, you can specify a lowercase filter that converts all characters to lowercase. The\n * filters are run in the order in which they are listed.\n */\n tokenFilters?: string[];\n /**\n * A list of character filters used to prepare input text before it is processed by the\n * tokenizer. For instance, they can replace certain characters or symbols. The filters are run\n * in the order in which they are listed.\n */\n charFilters?: string[];\n}\n\n/**\n * Contains the possible cases for Analyzer.\n */\nexport type LexicalAnalyzer =\n | CustomAnalyzer\n | PatternAnalyzer\n | LuceneStandardAnalyzer\n | StopAnalyzer;\n\n/**\n * Contains the possible cases for Skill.\n */\nexport type SearchIndexerSkill =\n | ConditionalSkill\n | KeyPhraseExtractionSkill\n | OcrSkill\n | ImageAnalysisSkill\n | LanguageDetectionSkill\n | ShaperSkill\n | MergeSkill\n | EntityRecognitionSkill\n | SentimentSkill\n | SplitSkill\n | PIIDetectionSkill\n | EntityRecognitionSkillV3\n | EntityLinkingSkill\n | SentimentSkillV3\n | CustomEntityLookupSkill\n | TextTranslationSkill\n | DocumentExtractionSkill\n | WebApiSkill;\n\n/**\n * Contains the possible cases for CognitiveServicesAccount.\n */\nexport type CognitiveServicesAccount =\n | DefaultCognitiveServicesAccount\n | CognitiveServicesAccountKey;\n/**\n * Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is\n * implemented using Apache Lucene.\n */\nexport interface PatternTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.PatternTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * A regular expression pattern to match token separators. Default is an expression that matches\n * one or more whitespace characters. Default value: `\\W+`.\n */\n pattern?: string;\n /**\n * Regular expression flags. Possible values include: 'CANON_EQ', 'CASE_INSENSITIVE', 'COMMENTS',\n * 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'\n */\n flags?: RegexFlags[];\n /**\n * The zero-based ordinal of the matching group in the regular expression pattern to extract into\n * tokens. Use -1 if you want to use the entire pattern to split the input into tokens,\n * irrespective of matching groups. Default is -1. Default value: -1.\n */\n group?: number;\n}\n/**\n * Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using\n * Apache Lucene.\n */\nexport interface LuceneStandardTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.StandardTokenizerV2\"\n | \"#Microsoft.Azure.Search.StandardTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The maximum token length. Default is 255. Tokens longer than the maximum length are split. The\n * maximum token length that can be used is 300 characters. Default value: 255.\n */\n maxTokenLength?: number;\n}\n\n/**\n * Generates n-grams of the given size(s) starting from the front or the back of an input token.\n * This token filter is implemented using Apache Lucene.\n */\nexport interface EdgeNGramTokenFilter {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.EdgeNGramTokenFilterV2\"\n | \"#Microsoft.Azure.Search.EdgeNGramTokenFilter\";\n /**\n * The name of the token filter. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of\n * maxGram. Default value: 1.\n */\n minGram?: number;\n /**\n * The maximum n-gram length. Default is 2. Maximum is 300. Default value: 2.\n */\n maxGram?: number;\n /**\n * Specifies which side of the input the n-gram should be generated from. Default is \"front\".\n * Possible values include: 'Front', 'Back'\n */\n side?: EdgeNGramTokenFilterSide;\n}\n\n/**\n * Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene.\n */\nexport interface KeywordTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.KeywordTokenizerV2\"\n | \"#Microsoft.Azure.Search.KeywordTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The maximum token length. Default is 256. Tokens longer than the maximum length are split. The\n * maximum token length that can be used is 300 characters. Default value: 256.\n */\n maxTokenLength?: number;\n}\n\n/**\n * Contains the possible cases for Tokenizer.\n */\nexport type LexicalTokenizer =\n | ClassicTokenizer\n | EdgeNGramTokenizer\n | KeywordTokenizer\n | MicrosoftLanguageTokenizer\n | MicrosoftLanguageStemmingTokenizer\n | NGramTokenizer\n | PathHierarchyTokenizer\n | PatternTokenizer\n | LuceneStandardTokenizer\n | UaxUrlEmailTokenizer;\n\n/**\n * Contains the possible cases for Similarity.\n */\nexport type SimilarityAlgorithm = ClassicSimilarity | BM25Similarity;\n\n/**\n * Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene.\n */\nexport interface NGramTokenFilter {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.NGramTokenFilterV2\"\n | \"#Microsoft.Azure.Search.NGramTokenFilter\";\n /**\n * The name of the token filter. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of\n * maxGram. Default value: 1.\n */\n minGram?: number;\n /**\n * The maximum n-gram length. Default is 2. Maximum is 300. Default value: 2.\n */\n maxGram?: number;\n}\n\n/**\n * Contains the possible cases for TokenFilter.\n */\nexport type TokenFilter =\n | AsciiFoldingTokenFilter\n | CjkBigramTokenFilter\n | CommonGramTokenFilter\n | DictionaryDecompounderTokenFilter\n | EdgeNGramTokenFilter\n | ElisionTokenFilter\n | KeepTokenFilter\n | KeywordMarkerTokenFilter\n | LengthTokenFilter\n | LimitTokenFilter\n | NGramTokenFilter\n | PatternCaptureTokenFilter\n | PatternReplaceTokenFilter\n | PhoneticTokenFilter\n | ShingleTokenFilter\n | SnowballTokenFilter\n | StemmerTokenFilter\n | StemmerOverrideTokenFilter\n | StopwordsTokenFilter\n | SynonymTokenFilter\n | TruncateTokenFilter\n | UniqueTokenFilter\n | WordDelimiterTokenFilter;\n\n/**\n * Contains the possible cases for CharFilter.\n */\nexport type CharFilter = MappingCharFilter | PatternReplaceCharFilter;\n\n/**\n * Contains the possible cases for LexicalNormalizer.\n */\nexport type LexicalNormalizer = CustomNormalizer;\n\n/**\n * Contains the possible cases for ScoringFunction.\n */\nexport type ScoringFunction =\n | DistanceScoringFunction\n | FreshnessScoringFunction\n | MagnitudeScoringFunction\n | TagScoringFunction;\n\n/**\n * Defines values for SearchFieldDataType.\n * Possible values include: 'Edm.String', 'Edm.Int32', 'Edm.Int64', 'Edm.Double', 'Edm.Boolean',\n * 'Edm.DateTimeOffset', 'Edm.GeographyPoint', 'Collection(Edm.String)',\n * 'Collection(Edm.Int32)', 'Collection(Edm.Int64)', 'Collection(Edm.Double)',\n * 'Collection(Edm.Boolean)', 'Collection(Edm.DateTimeOffset)', 'Collection(Edm.GeographyPoint)'\n * @readonly\n */\nexport type SearchFieldDataType =\n | \"Edm.String\"\n | \"Edm.Int32\"\n | \"Edm.Int64\"\n | \"Edm.Double\"\n | \"Edm.Boolean\"\n | \"Edm.DateTimeOffset\"\n | \"Edm.GeographyPoint\"\n | \"Collection(Edm.String)\"\n | \"Collection(Edm.Int32)\"\n | \"Collection(Edm.Int64)\"\n | \"Collection(Edm.Double)\"\n | \"Collection(Edm.Boolean)\"\n | \"Collection(Edm.DateTimeOffset)\"\n | \"Collection(Edm.GeographyPoint)\";\n\n/**\n * Defines values for ComplexDataType.\n * Possible values include: 'Edm.ComplexType', 'Collection(Edm.ComplexType)'\n * @readonly\n */\nexport type ComplexDataType = \"Edm.ComplexType\" | \"Collection(Edm.ComplexType)\";\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport type SearchField = SimpleField | ComplexField;\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport interface SimpleField {\n /**\n * The name of the field, which must be unique within the fields collection of the index or\n * parent field.\n */\n name: string;\n /**\n * The data type of the field. Possible values include: 'Edm.String', 'Edm.Int32', 'Edm.Int64',\n * 'Edm.Double', 'Edm.Boolean', 'Edm.DateTimeOffset', 'Edm.GeographyPoint'\n * 'Collection(Edm.String)', 'Collection(Edm.Int32)', 'Collection(Edm.Int64)',\n * 'Collection(Edm.Double)', 'Collection(Edm.Boolean)', 'Collection(Edm.DateTimeOffset)',\n * 'Collection(Edm.GeographyPoint)'\n */\n type: SearchFieldDataType;\n /**\n * A value indicating whether the field uniquely identifies documents in the index. Exactly one\n * top-level field in each index must be chosen as the key field and it must be of type\n * Edm.String. Key fields can be used to look up documents directly and update or delete specific\n * documents. Default is false.\n */\n key?: boolean;\n /**\n * A value indicating whether the field can be returned in a search result. You can enable this\n * option if you want to use a field (for example, margin) as a filter, sorting, or scoring\n * mechanism but do not want the field to be visible to the end user. This property must be false\n * for key fields. This property can be changed on existing fields.\n * Disabling this property does not cause any increase in index storage requirements.\n * Default is false.\n */\n hidden?: boolean;\n /**\n * A value indicating whether the field is full-text searchable. This means it will undergo\n * analysis such as word-breaking during indexing. If you set a searchable field to a value like\n * \"sunny day\", internally it will be split into the individual tokens \"sunny\" and \"day\". This\n * enables full-text searches for these terms. This property must be false for simple\n * fields of other non-string data types.\n * Note: searchable fields consume extra space in your index since Azure Cognitive Search will store an\n * additional tokenized version of the field value for full-text searches.\n * Defaults to false for simple fields.\n */\n searchable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in $filter queries. Filterable\n * differs from searchable in how strings are handled. Fields of type Edm.String or\n * Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are\n * for exact matches only. For example, if you set such a field f to \"sunny day\", $filter=f eq\n * 'sunny' will find no matches, but $filter=f eq 'sunny day' will.\n * Default is false.\n */\n filterable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in $orderby expressions. By\n * default Azure Cognitive Search sorts results by score, but in many experiences users will want\n * to sort by fields in the documents. A simple field can be sortable only if it is single-valued\n * (it has a single value in the scope of the parent document). Simple collection fields cannot\n * be sortable, since they are multi-valued. Simple sub-fields of complex collections are also\n * multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent\n * field, or an ancestor field, that's the complex collection. The default for sortable is false.\n */\n sortable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in facet queries. Typically\n * used in a presentation of search results that includes hit count by category (for example,\n * search for digital cameras and see hits by brand, by megapixels, by price, and so on).\n * Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable.\n * Default is false for all other simple fields.\n */\n facetable?: boolean;\n /**\n * The name of the language analyzer to use for the field. This option can be used only with\n * searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer.\n * Once the analyzer is chosen, it cannot be changed for the field.\n * KnownAnalyzerNames is an enum containing known values.\n */\n analyzerName?: LexicalAnalyzerName;\n /**\n * The name of the analyzer used at search time for the field. This option can be used only with\n * searchable fields. It must be set together with indexAnalyzer and it cannot be set together\n * with the analyzer option. This analyzer can be updated on an existing field.\n * KnownAnalyzerNames is an enum containing known values.\n */\n searchAnalyzerName?: LexicalAnalyzerName;\n /**\n * The name of the analyzer used at indexing time for the field. This option can be used only\n * with searchable fields. It must be set together with searchAnalyzer and it cannot be set\n * together with the analyzer option. Once the analyzer is chosen, it cannot be changed for the\n * field. KnownAnalyzerNames is an enum containing known values.\n */\n indexAnalyzerName?: LexicalAnalyzerName;\n /**\n * A list of the names of synonym maps to associate with this field. This option can be used only\n * with searchable fields. Currently only one synonym map per field is supported. Assigning a\n * synonym map to a field ensures that query terms targeting that field are expanded at\n * query-time using the rules in the synonym map. This attribute can be changed on existing\n * fields.\n */\n synonymMapNames?: string[];\n /**\n * The name of the normalizer used at indexing time for the field.\n */\n normalizerName?: LexicalNormalizerName;\n}\n\nexport function isComplexField(field: SearchField): field is ComplexField {\n return field.type === \"Edm.ComplexType\" || field.type === \"Collection(Edm.ComplexType)\";\n}\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport interface ComplexField {\n /**\n * The name of the field, which must be unique within the fields collection of the index or\n * parent field.\n */\n name: string;\n /**\n * The data type of the field.\n * Possible values include: 'Edm.ComplexType','Collection(Edm.ComplexType)'\n */\n type: ComplexDataType;\n /**\n * A list of sub-fields.\n */\n fields: SearchField[];\n}\n\n/**\n * Represents a synonym map definition.\n */\nexport interface SynonymMap {\n /**\n * The name of the synonym map.\n */\n name: string;\n /**\n * An array of synonym rules in the specified synonym map format.\n */\n synonyms: string[];\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your data when you want full assurance\n * that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you\n * have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore\n * attempts to set this property to null. You can change this property as needed if you want to\n * rotate your encryption key; Your data will be unaffected. Encryption with customer-managed\n * keys is not available for free search services, and is only available for paid services\n * created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n /**\n * The ETag of the synonym map.\n */\n etag?: string;\n}\n\n/**\n * An iterator for listing the indexes that exist in the Search service. Will make requests\n * as needed during iteration. Use .byPage() to make one request to the server\n * per iteration.\n */\n// eslint-disable-next-line @typescript-eslint/ban-types\nexport type IndexIterator = PagedAsyncIterableIterator<SearchIndex, SearchIndex[], {}>;\n\n/**\n * An iterator for listing the indexes that exist in the Search service. Will make requests\n * as needed during iteration. Use .byPage() to make one request to the server\n * per iteration.\n */\n// eslint-disable-next-line @typescript-eslint/ban-types\nexport type IndexNameIterator = PagedAsyncIterableIterator<string, string[], {}>;\n\n/**\n * Represents a search index definition, which describes the fields and search behavior of an\n * index.\n */\nexport interface SearchIndex {\n /**\n * The name of the index.\n */\n name: string;\n /**\n * The fields of the index.\n */\n fields: SearchField[];\n /**\n * The scoring profiles for the index.\n */\n scoringProfiles?: ScoringProfile[];\n /**\n * The name of the scoring profile to use if none is specified in the query. If this property is\n * not set and no scoring profile is specified in the query, then default scoring (tf-idf) will\n * be used.\n */\n defaultScoringProfile?: string;\n /**\n * Options to control Cross-Origin Resource Sharing (CORS) for the index.\n */\n corsOptions?: CorsOptions;\n /**\n * The suggesters for the index.\n */\n suggesters?: SearchSuggester[];\n /**\n * The analyzers for the index.\n */\n analyzers?: LexicalAnalyzer[];\n /**\n * The tokenizers for the index.\n */\n tokenizers?: LexicalTokenizer[];\n /**\n * The token filters for the index.\n */\n tokenFilters?: TokenFilter[];\n /**\n * The character filters for the index.\n */\n charFilters?: CharFilter[];\n /**\n * The normalizers for the index.\n */\n normalizers?: LexicalNormalizer[];\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your data when you want full assurance\n * that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you\n * have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore\n * attempts to set this property to null. You can change this property as needed if you want to\n * rotate your encryption key; Your data will be unaffected. Encryption with customer-managed\n * keys is not available for free search services, and is only available for paid services\n * created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n /**\n * The type of similarity algorithm to be used when scoring and ranking the documents matching a\n * search query. The similarity algorithm can only be defined at index creation time and cannot\n * be modified on existing indexes. If null, the ClassicSimilarity algorithm is used.\n */\n similarity?: SimilarityAlgorithm;\n /**\n * Defines parameters for a search index that influence semantic capabilities.\n */\n semanticSettings?: SemanticSettings;\n /**\n * The ETag of the index.\n */\n etag?: string;\n}\n\n/**\n * Represents an indexer.\n */\nexport interface SearchIndexer {\n /**\n * The name of the indexer.\n */\n name: string;\n /**\n * The description of the indexer.\n */\n description?: string;\n /**\n * The name of the datasource from which this indexer reads data.\n */\n dataSourceName: string;\n /**\n * The name of the skillset executing with this indexer.\n */\n skillsetName?: string;\n /**\n * The name of the index to which this indexer writes data.\n */\n targetIndexName: string;\n /**\n * The schedule for this indexer.\n */\n schedule?: IndexingSchedule;\n /**\n * Parameters for indexer execution.\n */\n parameters?: IndexingParameters;\n /**\n * Defines mappings between fields in the data source and corresponding target fields in the\n * index.\n */\n fieldMappings?: FieldMapping[];\n /**\n * Output field mappings are applied after enrichment and immediately before indexing.\n */\n outputFieldMappings?: FieldMapping[];\n /**\n * A value indicating whether the indexer is disabled. Default is false. Default value: false.\n */\n isDisabled?: boolean;\n /**\n * The ETag of the indexer.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your indexer definition (as well as\n * indexer execution status) when you want full assurance that no one, not even Microsoft, can\n * decrypt them in Azure Cognitive Search. Once you have encrypted your indexer definition, it\n * will always remain encrypted. Azure Cognitive Search will ignore attempts to set this property\n * to null. You can change this property as needed if you want to rotate your encryption key;\n * Your indexer definition (and indexer execution status) will be unaffected. Encryption with\n * customer-managed keys is not available for free search services, and is only available for\n * paid services created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n /**\n * Adds caching to an enrichment pipeline to allow for incremental modification steps without\n * having to rebuild the index every time.\n */\n cache?: SearchIndexerCache;\n}\n\n/**\n * A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be\n * used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym\n * maps.\n */\nexport interface SearchResourceEncryptionKey {\n /**\n * The name of your Azure Key Vault key to be used to encrypt your data at rest.\n */\n keyName: string;\n /**\n * The version of your Azure Key Vault key to be used to encrypt your data at rest.\n */\n keyVersion: string;\n /**\n * The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be\n * used to encrypt your data at rest. An example URI might be\n * https://my-keyvault-name.vault.azure.net.\n */\n vaultUrl: string;\n /**\n * An AAD Application ID that was granted the required access permissions to the Azure Key Vault\n * that is to be used when encrypting your data at rest. The Application ID should not be\n * confused with the Object ID for your AAD Application.\n */\n applicationId?: string;\n /**\n * The authentication key of the specified AAD application.\n */\n applicationSecret?: string;\n /**\n * An explicit managed identity to use for this encryption key. If not specified and the access\n * credentials property is null, the system-assigned managed identity is used. On update to the\n * resource, if the explicit identity is unspecified, it remains unchanged. If \"none\" is specified,\n * the value of this property is cleared.\n */\n identity?: SearchIndexerDataIdentity;\n}\n\n/**\n * A list of skills.\n */\nexport interface SearchIndexerSkillset {\n /**\n * The name of the skillset.\n */\n name: string;\n /**\n * The description of the skillset.\n */\n description?: string;\n /**\n * A list of skills in the skillset.\n */\n skills: SearchIndexerSkill[];\n /**\n * Details about cognitive services to be used when running skills.\n */\n cognitiveServicesAccount?: CognitiveServicesAccount;\n /**\n * Definition of additional projections to azure blob, table, or files, of enriched data.\n */\n knowledgeStore?: SearchIndexerKnowledgeStore;\n /**\n * The ETag of the skillset.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your skillset definition when you want\n * full assurance that no one, not even Microsoft, can decrypt your skillset definition in Azure\n * Cognitive Search. Once you have encrypted your skillset definition, it will always remain\n * encrypted. Azure Cognitive Search will ignore attempts to set this property to null. You can\n * change this property as needed if you want to rotate your encryption key; Your skillset\n * definition will be unaffected. Encryption with customer-managed keys is not available for free\n * search services, and is only available for paid services created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n\n/**\n * Defines parameters for a search index that influence scoring in search queries.\n */\nexport interface ScoringProfile {\n /**\n * The name of the scoring profile.\n */\n name: string;\n /**\n * Parameters that boost scoring based on text matches in certain index fields.\n */\n textWeights?: TextWeights;\n /**\n * The collection of functions that influence the scoring of documents.\n */\n functions?: ScoringFunction[];\n /**\n * A value indicating how the results of individual scoring functions should be combined.\n * Defaults to \"Sum\". Ignored if there are no scoring functions. Possible values include: 'sum',\n * 'average', 'minimum', 'maximum', 'firstMatching'\n */\n functionAggregation?: ScoringFunctionAggregation;\n}\n\n/**\n * Defines values for TokenizerName.\n * @readonly\n */\nexport enum KnownTokenizerNames {\n /**\n * Grammar-based tokenizer that is suitable for processing most European-language documents. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html\n */\n Classic = \"classic\",\n /**\n * Tokenizes the input from an edge into n-grams of the given size(s). See\n * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html\n */\n EdgeNGram = \"edgeNGram\",\n /**\n * Emits the entire input as a single token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html\n */\n Keyword = \"keyword_v2\",\n /**\n * Divides text at non-letters. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LetterTokenizer.html\n */\n Letter = \"letter\",\n /**\n * Divides text at non-letters and converts them to lower case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseTokenizer.html\n */\n Lowercase = \"lowercase\",\n /**\n * Divides text using language-specific rules.\n */\n // eslint-disable-next-line @typescript-eslint/no-shadow\n MicrosoftLanguageTokenizer = \"microsoft_language_tokenizer\",\n /**\n * Divides text using language-specific rules and reduces words to their base forms.\n */\n // eslint-disable-next-line @typescript-eslint/no-shadow\n MicrosoftLanguageStemmingTokenizer = \"microsoft_language_stemming_tokenizer\",\n /**\n * Tokenizes the input into n-grams of the given size(s). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html\n */\n NGram = \"nGram\",\n /**\n * Tokenizer for path-like hierarchies. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html\n */\n PathHierarchy = \"path_hierarchy_v2\",\n /**\n * Tokenizer that uses regex pattern matching to construct distinct tokens. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html\n */\n Pattern = \"pattern\",\n /**\n * Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop\n * filter. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html\n */\n Standard = \"standard_v2\",\n /**\n * Tokenizes urls and emails as one token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html\n */\n UaxUrlEmail = \"uax_url_email\",\n /**\n * Divides text at whitespace. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html\n */\n Whitespace = \"whitespace\"\n}\n\n/**\n * Defines values for TokenFilterName.\n * @readonly\n */\nexport enum KnownTokenFilterNames {\n /**\n * A token filter that applies the Arabic normalizer to normalize the orthography. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html\n */\n ArabicNormalization = \"arabic_normalization\",\n /**\n * Strips all characters after an apostrophe (including the apostrophe itself). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html\n */\n Apostrophe = \"apostrophe\",\n /**\n * Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127\n * ASCII characters (the \"Basic Latin\" Unicode block) into their ASCII equivalents, if such\n * equivalents exist. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html\n */\n AsciiFolding = \"asciifolding\",\n /**\n * Forms bigrams of CJK terms that are generated from StandardTokenizer. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html\n */\n CjkBigram = \"cjk_bigram\",\n /**\n * Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic\n * Latin, and half-width Katakana variants into the equivalent Kana. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html\n */\n CjkWidth = \"cjk_width\",\n /**\n * Removes English possessives, and dots from acronyms. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html\n */\n Classic = \"classic\",\n /**\n * Construct bigrams for frequently occurring terms while indexing. Single terms are still\n * indexed too, with bigrams overlaid. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html\n */\n CommonGram = \"common_grams\",\n /**\n * Generates n-grams of the given size(s) starting from the front or the back of an input token.\n * See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html\n */\n EdgeNGram = \"edgeNGram_v2\",\n /**\n * Removes elisions. For example, \"l'avion\" (the plane) will be converted to \"avion\" (plane). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html\n */\n Elision = \"elision\",\n /**\n * Normalizes German characters according to the heuristics of the German2 snowball algorithm.\n * See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html\n */\n GermanNormalization = \"german_normalization\",\n /**\n * Normalizes text in Hindi to remove some differences in spelling variations. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html\n */\n HindiNormalization = \"hindi_normalization\",\n /**\n * Normalizes the Unicode representation of text in Indian languages. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html\n */\n IndicNormalization = \"indic_normalization\",\n /**\n * Emits each incoming token twice, once as keyword and once as non-keyword. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html\n */\n KeywordRepeat = \"keyword_repeat\",\n /**\n * A high-performance kstem filter for English. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html\n */\n KStem = \"kstem\",\n /**\n * Removes words that are too long or too short. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html\n */\n Length = \"length\",\n /**\n * Limits the number of tokens while indexing. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html\n */\n Limit = \"limit\",\n /**\n * Normalizes token text to lower case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm\n */\n Lowercase = \"lowercase\",\n /**\n * Generates n-grams of the given size(s). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html\n */\n NGram = \"nGram_v2\",\n /**\n * Applies normalization for Persian. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html\n */\n PersianNormalization = \"persian_normalization\",\n /**\n * Create tokens for phonetic matches. See\n * https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html\n */\n Phonetic = \"phonetic\",\n /**\n * Uses the Porter stemming algorithm to transform the token stream. See\n * http://tartarus.org/~martin/PorterStemmer\n */\n PorterStem = \"porter_stem\",\n /**\n * Reverses the token string. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html\n */\n Reverse = \"reverse\",\n /**\n * Normalizes use of the interchangeable Scandinavian characters. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html\n */\n ScandinavianNormalization = \"scandinavian_normalization\",\n /**\n * Folds Scandinavian characters åÅäæÄÆ-&gt;a and öÖøØ-&gt;o. It also discriminates against use\n * of double vowels aa, ae, ao, oe and oo, leaving just the first one. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html\n */\n ScandinavianFoldingNormalization = \"scandinavian_folding\",\n /**\n * Creates combinations of tokens as a single token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html\n */\n Shingle = \"shingle\",\n /**\n * A filter that stems words using a Snowball-generated stemmer. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html\n */\n Snowball = \"snowball\",\n /**\n * Normalizes the Unicode representation of Sorani text. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html\n */\n SoraniNormalization = \"sorani_normalization\",\n /**\n * Language specific stemming filter. See\n * https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters\n */\n Stemmer = \"stemmer\",\n /**\n * Removes stop words from a token stream. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html\n */\n Stopwords = \"stopwords\",\n /**\n * Trims leading and trailing whitespace from tokens. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html\n */\n Trim = \"trim\",\n /**\n * Truncates the terms to a specific length. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html\n */\n Truncate = \"truncate\",\n /**\n * Filters out tokens with same text as the previous token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html\n */\n Unique = \"unique\",\n /**\n * Normalizes token text to upper case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html\n */\n Uppercase = \"uppercase\",\n /**\n * Splits words into subwords and performs optional transformations on subword groups.\n */\n WordDelimiter = \"word_delimiter\"\n}\n\n/**\n * Defines values for CharFilterName.\n * @readonly\n */\nexport enum KnownCharFilterNames {\n /**\n * A character filter that attempts to strip out HTML constructs. See\n * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.html\n */\n HtmlStrip = \"html_strip\"\n}\n\n/**\n * Defines values for AnalyzerName.\n * See https://docs.microsoft.com/rest/api/searchservice/Language-support\n * @readonly\n */\nexport enum KnownAnalyzerNames {\n /**\n * Arabic\n */\n ArMicrosoft = \"ar.microsoft\",\n /**\n * Arabic\n */\n ArLucene = \"ar.lucene\",\n /**\n * Armenian\n */\n HyLucene = \"hy.lucene\",\n /**\n * Bangla\n */\n BnMicrosoft = \"bn.microsoft\",\n /**\n * Basque\n */\n EuLucene = \"eu.lucene\",\n /**\n * Bulgarian\n */\n BgMicrosoft = \"bg.microsoft\",\n /**\n * Bulgarian\n */\n BgLucene = \"bg.lucene\",\n /**\n * Catalan\n */\n CaMicrosoft = \"ca.microsoft\",\n /**\n * Catalan\n */\n CaLucene = \"ca.lucene\",\n /**\n * Chinese Simplified\n */\n ZhHansMicrosoft = \"zh-Hans.microsoft\",\n /**\n * Chinese Simplified\n */\n ZhHansLucene = \"zh-Hans.lucene\",\n /**\n * Chinese Traditional\n */\n ZhHantMicrosoft = \"zh-Hant.microsoft\",\n /**\n * Chinese Traditional\n */\n ZhHantLucene = \"zh-Hant.lucene\",\n /**\n * Croatian\n */\n HrMicrosoft = \"hr.microsoft\",\n /**\n * Czech\n */\n CsMicrosoft = \"cs.microsoft\",\n /**\n * Czech\n */\n CsLucene = \"cs.lucene\",\n /**\n * Danish\n */\n DaMicrosoft = \"da.microsoft\",\n /**\n * Danish\n */\n DaLucene = \"da.lucene\",\n /**\n * Dutch\n */\n NlMicrosoft = \"nl.microsoft\",\n /**\n * Dutch\n */\n NlLucene = \"nl.lucene\",\n /**\n * English\n */\n EnMicrosoft = \"en.microsoft\",\n /**\n * English\n */\n EnLucene = \"en.lucene\",\n /**\n * Estonian\n */\n EtMicrosoft = \"et.microsoft\",\n /**\n * Finnish\n */\n FiMicrosoft = \"fi.microsoft\",\n /**\n * Finnish\n */\n FiLucene = \"fi.lucene\",\n /**\n * French\n */\n FrMicrosoft = \"fr.microsoft\",\n /**\n * French\n */\n FrLucene = \"fr.lucene\",\n /**\n * Galician\n */\n GlLucene = \"gl.lucene\",\n /**\n * German\n */\n DeMicrosoft = \"de.microsoft\",\n /**\n * German\n */\n DeLucene = \"de.lucene\",\n /**\n * Greek\n */\n ElMicrosoft = \"el.microsoft\",\n /**\n * Greek\n */\n ElLucene = \"el.lucene\",\n /**\n * Gujarati\n */\n GuMicrosoft = \"gu.microsoft\",\n /**\n * Hebrew\n */\n HeMicrosoft = \"he.microsoft\",\n /**\n * Hindi\n */\n HiMicrosoft = \"hi.microsoft\",\n /**\n * Hindi\n */\n HiLucene = \"hi.lucene\",\n /**\n * Hungarian\n */\n HuMicrosoft = \"hu.microsoft\",\n /**\n * Hungarian\n */\n HuLucene = \"hu.lucene\",\n /**\n * Icelandic\n */\n IsMicrosoft = \"is.microsoft\",\n /**\n * Indonesian (Bahasa)\n */\n IdMicrosoft = \"id.microsoft\",\n /**\n * Indonesian (Bahasa)\n */\n IdLucene = \"id.lucene\",\n /**\n * Irish\n */\n GaLucene = \"ga.lucene\",\n /**\n * Italian\n */\n ItMicrosoft = \"it.microsoft\",\n /**\n * Italian\n */\n ItLucene = \"it.lucene\",\n /**\n * Japanese\n */\n JaMicrosoft = \"ja.microsoft\",\n /**\n * Japanese\n */\n JaLucene = \"ja.lucene\",\n /**\n * Kannada\n */\n KnMicrosoft = \"kn.microsoft\",\n /**\n * Korean\n */\n KoMicrosoft = \"ko.microsoft\",\n /**\n * Korean\n */\n KoLucene = \"ko.lucene\",\n /**\n * Latvian\n */\n LvMicrosoft = \"lv.microsoft\",\n /**\n * Latvian\n */\n LvLucene = \"lv.lucene\",\n /**\n * Lithuanian\n */\n LtMicrosoft = \"lt.microsoft\",\n /**\n * Malayalam\n */\n MlMicrosoft = \"ml.microsoft\",\n /**\n * Malay (Latin)\n */\n MsMicrosoft = \"ms.microsoft\",\n /**\n * Marathi\n */\n MrMicrosoft = \"mr.microsoft\",\n /**\n * Norwegian\n */\n NbMicrosoft = \"nb.microsoft\",\n /**\n * Norwegian\n */\n NoLucene = \"no.lucene\",\n /**\n * Persian\n */\n FaLucene = \"fa.lucene\",\n /**\n * Polish\n */\n PlMicrosoft = \"pl.microsoft\",\n /**\n * Polish\n */\n PlLucene = \"pl.lucene\",\n /**\n * Portuguese (Brazil)\n */\n PtBRMicrosoft = \"pt-BR.microsoft\",\n /**\n * Portuguese (Brazil)\n */\n PtBRLucene = \"pt-BR.lucene\",\n /**\n * Portuguese (Portugal)\n */\n PtPTMicrosoft = \"pt-PT.microsoft\",\n /**\n * Portuguese (Portugal)\n */\n PtPTLucene = \"pt-PT.lucene\",\n /**\n * Punjabi\n */ PaMicrosoft = \"pa.microsoft\",\n /**\n * Romanian\n */\n RoMicrosoft = \"ro.microsoft\",\n /**\n * Romanian\n */\n RoLucene = \"ro.lucene\",\n /**\n * Russian\n */\n RuMicrosoft = \"ru.microsoft\",\n /**\n * Russian\n */\n RuLucene = \"ru.lucene\",\n /**\n * Serbian (Cyrillic)\n */\n SrCyrillicMicrosoft = \"sr-cyrillic.microsoft\",\n /**\n * Serbian (Latin)\n */\n SrLatinMicrosoft = \"sr-latin.microsoft\",\n /**\n * Slovak\n */\n SkMicrosoft = \"sk.microsoft\",\n /**\n * Slovenian\n */\n SlMicrosoft = \"sl.microsoft\",\n /**\n * Spanish\n */\n EsMicrosoft = \"es.microsoft\",\n /**\n * Spanish\n */\n EsLucene = \"es.lucene\",\n /**\n * Swedish\n */\n SvMicrosoft = \"sv.microsoft\",\n /**\n * Swedish\n */\n SvLucene = \"sv.lucene\",\n /**\n * Tamil\n */\n TaMicrosoft = \"ta.microsoft\",\n /**\n * Telugu\n */\n TeMicrosoft = \"te.microsoft\",\n /**\n * Thai\n */\n ThMicrosoft = \"th.microsoft\",\n /**\n * Thai\n */\n ThLucene = \"th.lucene\",\n /**\n * Turkish\n */\n TrMicrosoft = \"tr.microsoft\",\n /**\n * Turkish\n */\n TrLucene = \"tr.lucene\",\n /**\n * Ukrainian\n */\n UkMicrosoft = \"uk.microsoft\",\n /**\n * Urdu\n */\n UrMicrosoft = \"ur.microsoft\",\n /**\n * Vietnamese\n */\n ViMicrosoft = \"vi.microsoft\",\n /**\n * See: https://lucene.apache.org/core/6_6_1/core/org/apache/lucene/analysis/standard/StandardAnalyzer.html\n */\n StandardLucene = \"standard.lucene\",\n /**\n * See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html\n */\n StandardAsciiFoldingLucene = \"standardasciifolding.lucene\",\n /**\n * Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names.\n */\n Keyword = \"keyword\",\n /**\n * Flexibly separates text into terms via a regular expression pattern.\n */\n Pattern = \"pattern\",\n /**\n * Divides text at non-letters and converts them to lower case.\n */\n Simple = \"simple\",\n /**\n * Divides text at non-letters; Applies the lowercase and stopword token filters.\n */\n Stop = \"stop\",\n /**\n * An analyzer that uses the whitespace tokenizer.\n */\n Whitespace = \"whitespace\"\n}\n\n/**\n * Contains the possible cases for DataChangeDetectionPolicy.\n */\nexport type DataChangeDetectionPolicy =\n | HighWaterMarkChangeDetectionPolicy\n | SqlIntegratedChangeTrackingPolicy;\n\n/**\n * Contains the possible cases for SearchIndexerDataIdentity.\n */\nexport type SearchIndexerDataIdentity =\n | SearchIndexerDataNoneIdentity\n | SearchIndexerDataUserAssignedIdentity;\n\n/**\n * Contains the possible cases for DataDeletionDetectionPolicy.\n */\nexport type DataDeletionDetectionPolicy = SoftDeleteColumnDeletionDetectionPolicy;\n\n/**\n * Represents a datasource definition, which can be used to configure an indexer.\n */\nexport interface SearchIndexerDataSourceConnection {\n /**\n * The name of the datasource.\n */\n name: string;\n /**\n * The description of the datasource.\n */\n description?: string;\n /**\n * The type of the datasource. Possible values include: 'AzureSql', 'CosmosDb', 'AzureBlob',\n * 'AzureTable', 'MySql', 'AdlsGen2'\n */\n type: SearchIndexerDataSourceType;\n /**\n * The connection string for the datasource.\n */\n connectionString?: string;\n /**\n * The data container for the datasource.\n */\n container: SearchIndexerDataContainer;\n /**\n * An explicit managed identity to use for this datasource. If not specified and the connection\n * string is a managed identity, the system-assigned managed identity is used. If not specified,\n * the value remains unchanged. If \"none\" is specified, the value of this property is cleared.\n */\n identity?: SearchIndexerDataIdentity;\n /**\n * The data change detection policy for the datasource.\n */\n dataChangeDetectionPolicy?: DataChangeDetectionPolicy;\n /**\n * The data deletion detection policy for the datasource.\n */\n dataDeletionDetectionPolicy?: DataDeletionDetectionPolicy;\n /**\n * The ETag of the DataSource.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your datasource definition when you want\n * full assurance that no one, not even Microsoft, can decrypt your data source definition in\n * Azure Cognitive Search. Once you have encrypted your data source definition, it will always\n * remain encrypted. Azure Cognitive Search will ignore attempts to set this property to null.\n * You can change this property as needed if you want to rotate your encryption key; Your\n * datasource definition will be unaffected. Encryption with customer-managed keys is not\n * available for free search services, and is only available for paid services created on or\n * after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n// END manually modified generated interfaces\n"]}
@@ -289,7 +289,8 @@ export function generatedIndexToPublicIndex(generatedIndex) {
289
289
  normalizers: generatedIndex.normalizers,
290
290
  scoringProfiles: generatedIndex.scoringProfiles,
291
291
  fields: convertFieldsToPublic(generatedIndex.fields),
292
- similarity: convertSimilarityToPublic(generatedIndex.similarity)
292
+ similarity: convertSimilarityToPublic(generatedIndex.similarity),
293
+ semanticSettings: generatedIndex.semanticSettings
293
294
  };
294
295
  }
295
296
  export function generatedSearchResultToPublicSearchResult(results) {
@@ -338,7 +339,8 @@ export function publicIndexToGeneratedIndex(index) {
338
339
  analyzers: convertAnalyzersToGenerated(index.analyzers),
339
340
  tokenizers: convertTokenizersToGenerated(index.tokenizers),
340
341
  fields: convertFieldsToGenerated(index.fields),
341
- similarity: convertSimilarityToGenerated(index.similarity)
342
+ similarity: convertSimilarityToGenerated(index.similarity),
343
+ semanticSettings: index.semanticSettings
342
344
  };
343
345
  }
344
346
  export function generatedSkillsetToPublicSkillset(generatedSkillset) {