@azure/search-documents 11.3.0-beta.6 → 11.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. package/CHANGELOG.md +28 -0
  2. package/README.md +73 -35
  3. package/dist/index.js +839 -2940
  4. package/dist/index.js.map +1 -1
  5. package/dist-esm/src/constants.js +1 -1
  6. package/dist-esm/src/constants.js.map +1 -1
  7. package/dist-esm/src/generated/data/index.js +0 -1
  8. package/dist-esm/src/generated/data/index.js.map +1 -1
  9. package/dist-esm/src/generated/data/models/index.js +1 -202
  10. package/dist-esm/src/generated/data/models/index.js.map +1 -1
  11. package/dist-esm/src/generated/data/models/mappers.js +1 -138
  12. package/dist-esm/src/generated/data/models/mappers.js.map +1 -1
  13. package/dist-esm/src/generated/data/models/parameters.js +13 -72
  14. package/dist-esm/src/generated/data/models/parameters.js.map +1 -1
  15. package/dist-esm/src/generated/data/operations/documents.js +51 -23
  16. package/dist-esm/src/generated/data/operations/documents.js.map +1 -1
  17. package/dist-esm/src/generated/data/searchClient.js +2 -2
  18. package/dist-esm/src/generated/data/searchClient.js.map +1 -1
  19. package/dist-esm/src/generated/data/searchClientContext.js +11 -13
  20. package/dist-esm/src/generated/data/searchClientContext.js.map +1 -1
  21. package/dist-esm/src/generated/service/index.js +0 -1
  22. package/dist-esm/src/generated/service/index.js.map +1 -1
  23. package/dist-esm/src/generated/service/models/index.js +1 -904
  24. package/dist-esm/src/generated/service/models/index.js.map +1 -1
  25. package/dist-esm/src/generated/service/models/mappers.js +11 -674
  26. package/dist-esm/src/generated/service/models/mappers.js.map +1 -1
  27. package/dist-esm/src/generated/service/models/parameters.js +1 -37
  28. package/dist-esm/src/generated/service/models/parameters.js.map +1 -1
  29. package/dist-esm/src/generated/service/operations/dataSources.js +30 -13
  30. package/dist-esm/src/generated/service/operations/dataSources.js.map +1 -1
  31. package/dist-esm/src/generated/service/operations/indexers.js +45 -45
  32. package/dist-esm/src/generated/service/operations/indexers.js.map +1 -1
  33. package/dist-esm/src/generated/service/operations/indexes.js +40 -11
  34. package/dist-esm/src/generated/service/operations/indexes.js.map +1 -1
  35. package/dist-esm/src/generated/service/operations/skillsets.js +30 -43
  36. package/dist-esm/src/generated/service/operations/skillsets.js.map +1 -1
  37. package/dist-esm/src/generated/service/operations/synonymMaps.js +29 -9
  38. package/dist-esm/src/generated/service/operations/synonymMaps.js.map +1 -1
  39. package/dist-esm/src/generated/service/searchServiceClient.js +12 -9
  40. package/dist-esm/src/generated/service/searchServiceClient.js.map +1 -1
  41. package/dist-esm/src/generated/service/searchServiceClientContext.js +11 -13
  42. package/dist-esm/src/generated/service/searchServiceClientContext.js.map +1 -1
  43. package/dist-esm/src/geographyPoint.js +1 -1
  44. package/dist-esm/src/geographyPoint.js.map +1 -1
  45. package/dist-esm/src/index.js +3 -4
  46. package/dist-esm/src/index.js.map +1 -1
  47. package/dist-esm/src/indexDocumentsBatch.js +1 -1
  48. package/dist-esm/src/indexDocumentsBatch.js.map +1 -1
  49. package/dist-esm/src/indexModels.js.map +1 -1
  50. package/dist-esm/src/odataMetadataPolicy.js +15 -7
  51. package/dist-esm/src/odataMetadataPolicy.js.map +1 -1
  52. package/dist-esm/src/searchApiKeyCredentialPolicy.js +21 -8
  53. package/dist-esm/src/searchApiKeyCredentialPolicy.js.map +1 -1
  54. package/dist-esm/src/searchAudience.js +21 -0
  55. package/dist-esm/src/searchAudience.js.map +1 -0
  56. package/dist-esm/src/searchClient.js +56 -77
  57. package/dist-esm/src/searchClient.js.map +1 -1
  58. package/dist-esm/src/searchIndexClient.js +57 -70
  59. package/dist-esm/src/searchIndexClient.js.map +1 -1
  60. package/dist-esm/src/searchIndexerClient.js +65 -119
  61. package/dist-esm/src/searchIndexerClient.js.map +1 -1
  62. package/dist-esm/src/searchIndexingBufferedSender.js +11 -11
  63. package/dist-esm/src/searchIndexingBufferedSender.js.map +1 -1
  64. package/dist-esm/src/serialization.js +5 -5
  65. package/dist-esm/src/serialization.js.map +1 -1
  66. package/dist-esm/src/serviceModels.js.map +1 -1
  67. package/dist-esm/src/serviceUtils.js +31 -69
  68. package/dist-esm/src/serviceUtils.js.map +1 -1
  69. package/dist-esm/src/synonymMapHelper.js +2 -6
  70. package/dist-esm/src/synonymMapHelper.js.map +1 -1
  71. package/dist-esm/src/tracing.js +1 -1
  72. package/dist-esm/src/tracing.js.map +1 -1
  73. package/package.json +38 -38
  74. package/types/search-documents.d.ts +188 -1049
  75. package/dist-esm/src/generated/data/operationsInterfaces/documents.js +0 -9
  76. package/dist-esm/src/generated/data/operationsInterfaces/documents.js.map +0 -1
  77. package/dist-esm/src/generated/data/operationsInterfaces/index.js +0 -9
  78. package/dist-esm/src/generated/data/operationsInterfaces/index.js.map +0 -1
  79. package/dist-esm/src/generated/service/operationsInterfaces/dataSources.js +0 -9
  80. package/dist-esm/src/generated/service/operationsInterfaces/dataSources.js.map +0 -1
  81. package/dist-esm/src/generated/service/operationsInterfaces/index.js +0 -13
  82. package/dist-esm/src/generated/service/operationsInterfaces/index.js.map +0 -1
  83. package/dist-esm/src/generated/service/operationsInterfaces/indexers.js +0 -9
  84. package/dist-esm/src/generated/service/operationsInterfaces/indexers.js.map +0 -1
  85. package/dist-esm/src/generated/service/operationsInterfaces/indexes.js +0 -9
  86. package/dist-esm/src/generated/service/operationsInterfaces/indexes.js.map +0 -1
  87. package/dist-esm/src/generated/service/operationsInterfaces/skillsets.js +0 -9
  88. package/dist-esm/src/generated/service/operationsInterfaces/skillsets.js.map +0 -1
  89. package/dist-esm/src/generated/service/operationsInterfaces/synonymMaps.js +0 -9
  90. package/dist-esm/src/generated/service/operationsInterfaces/synonymMaps.js.map +0 -1
@@ -1 +1 @@
1
- {"version":3,"file":"serviceModels.js","sourceRoot":"","sources":["../../src/serviceModels.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;AA02BlC,MAAM,UAAU,cAAc,CAAC,KAAkB;IAC/C,OAAO,KAAK,CAAC,IAAI,KAAK,iBAAiB,IAAI,KAAK,CAAC,IAAI,KAAK,6BAA6B,CAAC;AAC1F,CAAC;AA8TD;;;GAGG;AACH,MAAM,CAAN,IAAY,mBAmEX;AAnED,WAAY,mBAAmB;IAC7B;;;OAGG;IACH,0CAAmB,CAAA;IACnB;;;OAGG;IACH,8CAAuB,CAAA;IACvB;;;OAGG;IACH,6CAAsB,CAAA;IACtB;;;OAGG;IACH,wCAAiB,CAAA;IACjB;;;OAGG;IACH,8CAAuB,CAAA;IACvB;;OAEG;IACH,wDAAwD;IACxD,kFAA2D,CAAA;IAC3D;;OAEG;IACH,wDAAwD;IACxD,mGAA4E,CAAA;IAC5E;;;OAGG;IACH,sCAAe,CAAA;IACf;;;OAGG;IACH,0DAAmC,CAAA;IACnC;;;OAGG;IACH,0CAAmB,CAAA;IACnB;;;;OAIG;IACH,+CAAwB,CAAA;IACxB;;;OAGG;IACH,oDAA6B,CAAA;IAC7B;;;OAGG;IACH,gDAAyB,CAAA;AAC3B,CAAC,EAnEW,mBAAmB,KAAnB,mBAAmB,QAmE9B;AAED;;;GAGG;AACH,MAAM,CAAN,IAAY,qBAiLX;AAjLD,WAAY,qBAAqB;IAC/B;;;OAGG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,kDAAyB,CAAA;IACzB;;;;;OAKG;IACH,sDAA6B,CAAA;IAC7B;;;OAGG;IACH,iDAAwB,CAAA;IACxB;;;;OAIG;IACH,+CAAsB,CAAA;IACtB;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;;OAIG;IACH,oDAA2B,CAAA;IAC3B;;;;OAIG;IACH,mDAA0B,CAAA;IAC1B;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;;OAIG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,mEAA0C,CAAA;IAC1C;;;OAGG;IACH,mEAA0C,CAAA;IAC1C;;;OAGG;IACH,yDAAgC,CAAA;IAChC;;;OAGG;IACH,wCAAe,CAAA;IACf;;;OAGG;IACH,0CAAiB,CAAA;IACjB;;;OAGG;IACH,wCAAe,CAAA;IACf;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;;OAGG;IACH,2CAAkB,CAAA;IAClB;;;OAGG;IACH,uEAA8C,CAAA;IAC9C;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,mDAA0B,CAAA;IAC1B;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,iFAAwD,CAAA;IACxD;;;;OAIG;IACH,kFAAyD,CAAA;IACzD;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;;OAGG;IACH,sCAAa,CAAA;IACb;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,0CAAiB,CAAA;IACjB;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;OAEG;IACH,yDAAgC,CAAA;AAClC,CAAC,EAjLW,qBAAqB,KAArB,qBAAqB,QAiLhC;AAED;;;GAGG;AACH,MAAM,CAAN,IAAY,oBAMX;AAND,WAAY,oBAAoB;IAC9B;;;OAGG;IACH,gDAAwB,CAAA;AAC1B,CAAC,EANW,oBAAoB,KAApB,oBAAoB,QAM/B;AAED;;;;GAIG;AACH,MAAM,CAAN,IAAY,kBAoXX;AApXD,WAAY,kBAAkB;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,2DAAqC,CAAA;IACrC;;OAEG;IACH,qDAA+B,CAAA;IAC/B;;OAEG;IACH,2DAAqC,CAAA;IACrC;;OAEG;IACH,qDAA+B,CAAA;IAC/B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,uDAAiC,CAAA;IACjC;;OAEG;IACH,iDAA2B,CAAA;IAC3B;;OAEG;IACH,uDAAiC,CAAA;IACjC;;OAEG;IACH,iDAA2B,CAAA;IAC3B;;OAEG,CAAC,kDAA4B,CAAA;IAChC;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,mEAA6C,CAAA;IAC7C;;OAEG;IACH,6DAAuC,CAAA;IACvC;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,wDAAkC,CAAA;IAClC;;OAEG;IACH,gFAA0D,CAAA;IAC1D;;OAEG;IACH,yCAAmB,CAAA;IACnB;;OAEG;IACH,yCAAmB,CAAA;IACnB;;OAEG;IACH,uCAAiB,CAAA;IACjB;;OAEG;IACH,mCAAa,CAAA;IACb;;OAEG;IACH,+CAAyB,CAAA;AAC3B,CAAC,EApXW,kBAAkB,KAAlB,kBAAkB,QAoX7B;AA6ED,6CAA6C","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT license.\n\nimport { OperationOptions } from \"@azure/core-client\";\nimport {\n LuceneStandardAnalyzer,\n StopAnalyzer,\n CorsOptions,\n Suggester as SearchSuggester,\n ClassicTokenizer,\n EdgeNGramTokenizer,\n MicrosoftLanguageTokenizer,\n MicrosoftLanguageStemmingTokenizer,\n NGramTokenizer,\n PathHierarchyTokenizerV2 as PathHierarchyTokenizer,\n UaxUrlEmailTokenizer,\n AsciiFoldingTokenFilter,\n CjkBigramTokenFilter,\n CommonGramTokenFilter,\n DictionaryDecompounderTokenFilter,\n LengthTokenFilter,\n ElisionTokenFilter,\n KeepTokenFilter,\n KeywordMarkerTokenFilter,\n LimitTokenFilter,\n PatternCaptureTokenFilter,\n PatternReplaceTokenFilter,\n PhoneticTokenFilter,\n ShingleTokenFilter,\n SnowballTokenFilter,\n StemmerTokenFilter,\n StemmerOverrideTokenFilter,\n StopwordsTokenFilter,\n SynonymTokenFilter,\n TruncateTokenFilter,\n UniqueTokenFilter,\n WordDelimiterTokenFilter,\n MappingCharFilter,\n PatternReplaceCharFilter,\n DistanceScoringFunction,\n FreshnessScoringFunction,\n MagnitudeScoringFunction,\n TagScoringFunction,\n TextWeights,\n ScoringFunctionAggregation,\n RegexFlags,\n ConditionalSkill,\n KeyPhraseExtractionSkill,\n OcrSkill,\n ImageAnalysisSkill,\n LanguageDetectionSkill,\n ShaperSkill,\n MergeSkill,\n EntityRecognitionSkill,\n SentimentSkill,\n SplitSkill,\n PIIDetectionSkill,\n EntityRecognitionSkillV3,\n EntityLinkingSkill,\n SentimentSkillV3,\n CustomEntityLookupSkill,\n DocumentExtractionSkill,\n TextTranslationSkill,\n WebApiSkill,\n AzureMachineLearningSkill,\n DefaultCognitiveServicesAccount,\n CognitiveServicesAccountKey,\n HighWaterMarkChangeDetectionPolicy,\n SqlIntegratedChangeTrackingPolicy,\n SearchIndexerDataUserAssignedIdentity,\n SearchIndexerDataNoneIdentity,\n SoftDeleteColumnDeletionDetectionPolicy,\n SearchIndexerDataSourceType,\n SearchIndexerDataContainer,\n LexicalAnalyzerName,\n ClassicSimilarity,\n BM25Similarity,\n EdgeNGramTokenFilterSide,\n ServiceCounters,\n ServiceLimits,\n FieldMapping,\n IndexingParameters,\n IndexingSchedule,\n LexicalNormalizerName,\n CustomNormalizer,\n SearchIndexerKnowledgeStore,\n SearchIndexerCache,\n SemanticSettings,\n} from \"./generated/service/models\";\n\nimport { PagedAsyncIterableIterator } from \"@azure/core-paging\";\n\n/**\n * Options for a list skillsets operation.\n */\nexport type ListSkillsetsOptions = OperationOptions;\n\n/**\n * Options for a list synonymMaps operation.\n */\nexport type ListSynonymMapsOptions = OperationOptions;\n\n/**\n * Options for a list indexes operation.\n */\nexport type ListIndexesOptions = OperationOptions;\n\n/**\n * Options for a list indexers operation.\n */\nexport type ListIndexersOptions = OperationOptions;\n\n/**\n * Options for a list data sources operation.\n */\nexport type ListDataSourceConnectionsOptions = OperationOptions;\n\n/**\n * Options for get index operation.\n */\nexport type GetIndexOptions = OperationOptions;\n\n/**\n * Options for get skillset operation.\n */\nexport type GetSkillSetOptions = OperationOptions;\n\n/**\n * Options for get synonymmaps operation.\n */\nexport type GetSynonymMapsOptions = OperationOptions;\n\n/**\n * Options for get indexer operation.\n */\nexport type GetIndexerOptions = OperationOptions;\n\n/**\n * Options for get datasource operation.\n */\nexport type GetDataSourceConnectionOptions = OperationOptions;\n\n/**\n * Options for get index statistics operation.\n */\nexport type GetIndexStatisticsOptions = OperationOptions;\n\n/**\n * Statistics for a given index. Statistics are collected periodically and are not guaranteed to\n * always be up-to-date.\n */\nexport interface SearchIndexStatistics {\n /**\n * The number of documents in the index.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly documentCount: number;\n /**\n * The amount of storage in bytes consumed by the index.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly storageSize: number;\n}\n\n/**\n * Response from a get service statistics request. If successful, it includes service level\n * counters and limits.\n */\nexport interface SearchServiceStatistics {\n /**\n * Service level resource counters.\n */\n counters: ServiceCounters;\n /**\n * Service level general limits.\n */\n limits: ServiceLimits;\n}\n\n/**\n * Options for get service statistics operation.\n */\nexport type GetServiceStatisticsOptions = OperationOptions;\n\n/**\n * Options for get indexer status operation.\n */\nexport type GetIndexerStatusOptions = OperationOptions;\n\n/**\n * Options for reset indexer operation.\n */\nexport type ResetIndexerOptions = OperationOptions;\n\n/**\n * Options for run indexer operation.\n */\nexport type RunIndexerOptions = OperationOptions;\n\n/**\n * Options for create index operation.\n */\nexport type CreateIndexOptions = OperationOptions;\n\n/**\n * Options for create skillset operation.\n */\nexport type CreateSkillsetOptions = OperationOptions;\n\n/**\n * Options for create synonymmap operation.\n */\nexport type CreateSynonymMapOptions = OperationOptions;\n\n/**\n * Options for create indexer operation.\n */\nexport type CreateIndexerOptions = OperationOptions;\n\n/**\n * Options for create datasource operation.\n */\nexport type CreateDataSourceConnectionOptions = OperationOptions;\n\n/**\n * Options for create/update index operation.\n */\nexport interface CreateOrUpdateIndexOptions extends OperationOptions {\n /**\n * Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by\n * taking the index offline for at least a few seconds. This temporarily causes indexing and\n * query requests to fail. Performance and write availability of the index can be impaired for\n * several minutes after the index is updated, or longer for very large indexes.\n */\n allowIndexDowntime?: boolean;\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for reset docs operation.\n */\nexport interface ResetDocumentsOptions extends OperationOptions {\n /** document keys to be reset */\n documentKeys?: string[];\n /** datasource document identifiers to be reset */\n datasourceDocumentIds?: string[];\n /** If false, keys or ids will be appended to existing ones. If true, only the keys or ids in this payload will be queued to be re-ingested. */\n overwrite?: boolean;\n}\n\n/**\n * Options for reset skills operation.\n */\nexport interface ResetSkillsOptions extends OperationOptions {\n /** the names of skills to be reset. */\n skillNames?: string[];\n}\n\n/**\n * Options for create/update skillset operation.\n */\nexport interface CreateOrUpdateSkillsetOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n /**\n * Ignores cache reset requirements.\n */\n skipIndexerResetRequirementForCache?: boolean;\n /**\n * Disables cache reprocessing change detection.\n */\n disableCacheReprocessingChangeDetection?: boolean;\n}\n\n/**\n * Options for create/update synonymmap operation.\n */\nexport interface CreateOrUpdateSynonymMapOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for create/update indexer operation.\n */\nexport interface CreateorUpdateIndexerOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n /** Ignores cache reset requirements. */\n skipIndexerResetRequirementForCache?: boolean;\n /** Disables cache reprocessing change detection. */\n disableCacheReprocessingChangeDetection?: boolean;\n}\n\n/**\n * Options for create/update datasource operation.\n */\nexport interface CreateorUpdateDataSourceConnectionOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n /**\n * Ignores cache reset requirements.\n */\n skipIndexerResetRequirementForCache?: boolean;\n}\n\n/**\n * Options for delete index operation.\n */\nexport interface DeleteIndexOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete skillset operaion.\n */\nexport interface DeleteSkillsetOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete synonymmap operation.\n */\nexport interface DeleteSynonymMapOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete indexer operation.\n */\nexport interface DeleteIndexerOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete datasource operation.\n */\nexport interface DeleteDataSourceConnectionOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Specifies some text and analysis components used to break that text into tokens.\n */\nexport interface AnalyzeRequest {\n /**\n * The text to break into tokens.\n */\n text: string;\n /**\n * The name of the analyzer to use to break the given text. If this parameter is not specified,\n * you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually\n * exclusive. KnownAnalyzerNames is an enum containing known values.\n * NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.\n */\n analyzerName?: string;\n /**\n * The name of the tokenizer to use to break the given text. If this parameter is not specified,\n * you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually\n * exclusive. KnownTokenizerNames is an enum containing known values.\n * NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.\n */\n tokenizerName?: string;\n /**\n * The name of the normalizer to use to normalize the given text.\n */\n normalizerName?: LexicalNormalizerName;\n /**\n * An optional list of token filters to use when breaking the given text. This parameter can only\n * be set when using the tokenizer parameter.\n */\n tokenFilters?: string[];\n /**\n * An optional list of character filters to use when breaking the given text. This parameter can\n * only be set when using the tokenizer parameter.\n */\n charFilters?: string[];\n}\n\n/**\n * Options for analyze text operation.\n */\nexport type AnalyzeTextOptions = OperationOptions & AnalyzeRequest;\n\n// BEGIN manually modified generated interfaces\n//\n// This section is for places where we have to manually fix issues\n// with interfaces from the generated code.\n// One issue is that unions of discriminated types generated with\n// their abstract base class as a member.\n\n/**\n * Flexibly separates text into terms via a regular expression pattern. This analyzer is\n * implemented using Apache Lucene.\n */\nexport interface PatternAnalyzer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.PatternAnalyzer\";\n /**\n * The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores,\n * can only start and end with alphanumeric characters, and is limited to 128 characters.\n */\n name: string;\n /**\n * A value indicating whether terms should be lower-cased. Default is true. Default value: true.\n */\n lowerCaseTerms?: boolean;\n /**\n * A regular expression pattern to match token separators. Default is an expression that matches\n * one or more whitespace characters. Default value: `\\W+`.\n */\n pattern?: string;\n /**\n * Regular expression flags. Possible values include: 'CANON_EQ', 'CASE_INSENSITIVE', 'COMMENTS',\n * 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'\n */\n flags?: RegexFlags[];\n /**\n * A list of stopwords.\n */\n stopwords?: string[];\n}\n\n/**\n * Allows you to take control over the process of converting text into indexable/searchable tokens.\n * It's a user-defined configuration consisting of a single predefined tokenizer and one or more\n * filters. The tokenizer is responsible for breaking text into tokens, and the filters for\n * modifying tokens emitted by the tokenizer.\n */\nexport interface CustomAnalyzer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.CustomAnalyzer\";\n /**\n * The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores,\n * can only start and end with alphanumeric characters, and is limited to 128 characters.\n */\n name: string;\n /**\n * The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as\n * breaking a sentence into words. KnownTokenizerNames is an enum containing known values.\n */\n tokenizerName: string;\n /**\n * A list of token filters used to filter out or modify the tokens generated by a tokenizer. For\n * example, you can specify a lowercase filter that converts all characters to lowercase. The\n * filters are run in the order in which they are listed.\n */\n tokenFilters?: string[];\n /**\n * A list of character filters used to prepare input text before it is processed by the\n * tokenizer. For instance, they can replace certain characters or symbols. The filters are run\n * in the order in which they are listed.\n */\n charFilters?: string[];\n}\n\n/**\n * Contains the possible cases for Analyzer.\n */\nexport type LexicalAnalyzer =\n | CustomAnalyzer\n | PatternAnalyzer\n | LuceneStandardAnalyzer\n | StopAnalyzer;\n\n/**\n * Contains the possible cases for Skill.\n */\nexport type SearchIndexerSkill =\n | ConditionalSkill\n | KeyPhraseExtractionSkill\n | OcrSkill\n | ImageAnalysisSkill\n | LanguageDetectionSkill\n | ShaperSkill\n | MergeSkill\n | EntityRecognitionSkill\n | SentimentSkill\n | SplitSkill\n | PIIDetectionSkill\n | EntityRecognitionSkillV3\n | EntityLinkingSkill\n | SentimentSkillV3\n | CustomEntityLookupSkill\n | TextTranslationSkill\n | DocumentExtractionSkill\n | WebApiSkill\n | AzureMachineLearningSkill;\n\n/**\n * Contains the possible cases for CognitiveServicesAccount.\n */\nexport type CognitiveServicesAccount =\n | DefaultCognitiveServicesAccount\n | CognitiveServicesAccountKey;\n/**\n * Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is\n * implemented using Apache Lucene.\n */\nexport interface PatternTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.PatternTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * A regular expression pattern to match token separators. Default is an expression that matches\n * one or more whitespace characters. Default value: `\\W+`.\n */\n pattern?: string;\n /**\n * Regular expression flags. Possible values include: 'CANON_EQ', 'CASE_INSENSITIVE', 'COMMENTS',\n * 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'\n */\n flags?: RegexFlags[];\n /**\n * The zero-based ordinal of the matching group in the regular expression pattern to extract into\n * tokens. Use -1 if you want to use the entire pattern to split the input into tokens,\n * irrespective of matching groups. Default is -1. Default value: -1.\n */\n group?: number;\n}\n/**\n * Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using\n * Apache Lucene.\n */\nexport interface LuceneStandardTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.StandardTokenizerV2\"\n | \"#Microsoft.Azure.Search.StandardTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The maximum token length. Default is 255. Tokens longer than the maximum length are split. The\n * maximum token length that can be used is 300 characters. Default value: 255.\n */\n maxTokenLength?: number;\n}\n\n/**\n * Generates n-grams of the given size(s) starting from the front or the back of an input token.\n * This token filter is implemented using Apache Lucene.\n */\nexport interface EdgeNGramTokenFilter {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.EdgeNGramTokenFilterV2\"\n | \"#Microsoft.Azure.Search.EdgeNGramTokenFilter\";\n /**\n * The name of the token filter. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of\n * maxGram. Default value: 1.\n */\n minGram?: number;\n /**\n * The maximum n-gram length. Default is 2. Maximum is 300. Default value: 2.\n */\n maxGram?: number;\n /**\n * Specifies which side of the input the n-gram should be generated from. Default is \"front\".\n * Possible values include: 'Front', 'Back'\n */\n side?: EdgeNGramTokenFilterSide;\n}\n\n/**\n * Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene.\n */\nexport interface KeywordTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.KeywordTokenizerV2\"\n | \"#Microsoft.Azure.Search.KeywordTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The maximum token length. Default is 256. Tokens longer than the maximum length are split. The\n * maximum token length that can be used is 300 characters. Default value: 256.\n */\n maxTokenLength?: number;\n}\n\n/**\n * Contains the possible cases for Tokenizer.\n */\nexport type LexicalTokenizer =\n | ClassicTokenizer\n | EdgeNGramTokenizer\n | KeywordTokenizer\n | MicrosoftLanguageTokenizer\n | MicrosoftLanguageStemmingTokenizer\n | NGramTokenizer\n | PathHierarchyTokenizer\n | PatternTokenizer\n | LuceneStandardTokenizer\n | UaxUrlEmailTokenizer;\n\n/**\n * Contains the possible cases for Similarity.\n */\nexport type SimilarityAlgorithm = ClassicSimilarity | BM25Similarity;\n\n/**\n * Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene.\n */\nexport interface NGramTokenFilter {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.NGramTokenFilterV2\"\n | \"#Microsoft.Azure.Search.NGramTokenFilter\";\n /**\n * The name of the token filter. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of\n * maxGram. Default value: 1.\n */\n minGram?: number;\n /**\n * The maximum n-gram length. Default is 2. Maximum is 300. Default value: 2.\n */\n maxGram?: number;\n}\n\n/**\n * Contains the possible cases for TokenFilter.\n */\nexport type TokenFilter =\n | AsciiFoldingTokenFilter\n | CjkBigramTokenFilter\n | CommonGramTokenFilter\n | DictionaryDecompounderTokenFilter\n | EdgeNGramTokenFilter\n | ElisionTokenFilter\n | KeepTokenFilter\n | KeywordMarkerTokenFilter\n | LengthTokenFilter\n | LimitTokenFilter\n | NGramTokenFilter\n | PatternCaptureTokenFilter\n | PatternReplaceTokenFilter\n | PhoneticTokenFilter\n | ShingleTokenFilter\n | SnowballTokenFilter\n | StemmerTokenFilter\n | StemmerOverrideTokenFilter\n | StopwordsTokenFilter\n | SynonymTokenFilter\n | TruncateTokenFilter\n | UniqueTokenFilter\n | WordDelimiterTokenFilter;\n\n/**\n * Contains the possible cases for CharFilter.\n */\nexport type CharFilter = MappingCharFilter | PatternReplaceCharFilter;\n\n/**\n * Contains the possible cases for LexicalNormalizer.\n */\nexport type LexicalNormalizer = CustomNormalizer;\n\n/**\n * Contains the possible cases for ScoringFunction.\n */\nexport type ScoringFunction =\n | DistanceScoringFunction\n | FreshnessScoringFunction\n | MagnitudeScoringFunction\n | TagScoringFunction;\n\n/**\n * Defines values for SearchFieldDataType.\n * Possible values include: 'Edm.String', 'Edm.Int32', 'Edm.Int64', 'Edm.Double', 'Edm.Boolean',\n * 'Edm.DateTimeOffset', 'Edm.GeographyPoint', 'Collection(Edm.String)',\n * 'Collection(Edm.Int32)', 'Collection(Edm.Int64)', 'Collection(Edm.Double)',\n * 'Collection(Edm.Boolean)', 'Collection(Edm.DateTimeOffset)', 'Collection(Edm.GeographyPoint)'\n * @readonly\n */\nexport type SearchFieldDataType =\n | \"Edm.String\"\n | \"Edm.Int32\"\n | \"Edm.Int64\"\n | \"Edm.Double\"\n | \"Edm.Boolean\"\n | \"Edm.DateTimeOffset\"\n | \"Edm.GeographyPoint\"\n | \"Collection(Edm.String)\"\n | \"Collection(Edm.Int32)\"\n | \"Collection(Edm.Int64)\"\n | \"Collection(Edm.Double)\"\n | \"Collection(Edm.Boolean)\"\n | \"Collection(Edm.DateTimeOffset)\"\n | \"Collection(Edm.GeographyPoint)\";\n\n/**\n * Defines values for ComplexDataType.\n * Possible values include: 'Edm.ComplexType', 'Collection(Edm.ComplexType)'\n * @readonly\n */\nexport type ComplexDataType = \"Edm.ComplexType\" | \"Collection(Edm.ComplexType)\";\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport type SearchField = SimpleField | ComplexField;\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport interface SimpleField {\n /**\n * The name of the field, which must be unique within the fields collection of the index or\n * parent field.\n */\n name: string;\n /**\n * The data type of the field. Possible values include: 'Edm.String', 'Edm.Int32', 'Edm.Int64',\n * 'Edm.Double', 'Edm.Boolean', 'Edm.DateTimeOffset', 'Edm.GeographyPoint'\n * 'Collection(Edm.String)', 'Collection(Edm.Int32)', 'Collection(Edm.Int64)',\n * 'Collection(Edm.Double)', 'Collection(Edm.Boolean)', 'Collection(Edm.DateTimeOffset)',\n * 'Collection(Edm.GeographyPoint)'\n */\n type: SearchFieldDataType;\n /**\n * A value indicating whether the field uniquely identifies documents in the index. Exactly one\n * top-level field in each index must be chosen as the key field and it must be of type\n * Edm.String. Key fields can be used to look up documents directly and update or delete specific\n * documents. Default is false.\n */\n key?: boolean;\n /**\n * A value indicating whether the field can be returned in a search result. You can enable this\n * option if you want to use a field (for example, margin) as a filter, sorting, or scoring\n * mechanism but do not want the field to be visible to the end user. This property must be false\n * for key fields. This property can be changed on existing fields.\n * Disabling this property does not cause any increase in index storage requirements.\n * Default is false.\n */\n hidden?: boolean;\n /**\n * A value indicating whether the field is full-text searchable. This means it will undergo\n * analysis such as word-breaking during indexing. If you set a searchable field to a value like\n * \"sunny day\", internally it will be split into the individual tokens \"sunny\" and \"day\". This\n * enables full-text searches for these terms. This property must be false for simple\n * fields of other non-string data types.\n * Note: searchable fields consume extra space in your index since Azure Cognitive Search will store an\n * additional tokenized version of the field value for full-text searches.\n * Defaults to false for simple fields.\n */\n searchable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in $filter queries. Filterable\n * differs from searchable in how strings are handled. Fields of type Edm.String or\n * Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are\n * for exact matches only. For example, if you set such a field f to \"sunny day\", $filter=f eq\n * 'sunny' will find no matches, but $filter=f eq 'sunny day' will.\n * Default is false.\n */\n filterable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in $orderby expressions. By\n * default Azure Cognitive Search sorts results by score, but in many experiences users will want\n * to sort by fields in the documents. A simple field can be sortable only if it is single-valued\n * (it has a single value in the scope of the parent document). Simple collection fields cannot\n * be sortable, since they are multi-valued. Simple sub-fields of complex collections are also\n * multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent\n * field, or an ancestor field, that's the complex collection. The default for sortable is false.\n */\n sortable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in facet queries. Typically\n * used in a presentation of search results that includes hit count by category (for example,\n * search for digital cameras and see hits by brand, by megapixels, by price, and so on).\n * Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable.\n * Default is false for all other simple fields.\n */\n facetable?: boolean;\n /**\n * The name of the language analyzer to use for the field. This option can be used only with\n * searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer.\n * Once the analyzer is chosen, it cannot be changed for the field.\n * KnownAnalyzerNames is an enum containing known values.\n */\n analyzerName?: LexicalAnalyzerName;\n /**\n * The name of the analyzer used at search time for the field. This option can be used only with\n * searchable fields. It must be set together with indexAnalyzer and it cannot be set together\n * with the analyzer option. This analyzer can be updated on an existing field.\n * KnownAnalyzerNames is an enum containing known values.\n */\n searchAnalyzerName?: LexicalAnalyzerName;\n /**\n * The name of the analyzer used at indexing time for the field. This option can be used only\n * with searchable fields. It must be set together with searchAnalyzer and it cannot be set\n * together with the analyzer option. Once the analyzer is chosen, it cannot be changed for the\n * field. KnownAnalyzerNames is an enum containing known values.\n */\n indexAnalyzerName?: LexicalAnalyzerName;\n /**\n * A list of the names of synonym maps to associate with this field. This option can be used only\n * with searchable fields. Currently only one synonym map per field is supported. Assigning a\n * synonym map to a field ensures that query terms targeting that field are expanded at\n * query-time using the rules in the synonym map. This attribute can be changed on existing\n * fields.\n */\n synonymMapNames?: string[];\n /**\n * The name of the normalizer used at indexing time for the field.\n */\n normalizerName?: LexicalNormalizerName;\n}\n\nexport function isComplexField(field: SearchField): field is ComplexField {\n return field.type === \"Edm.ComplexType\" || field.type === \"Collection(Edm.ComplexType)\";\n}\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport interface ComplexField {\n /**\n * The name of the field, which must be unique within the fields collection of the index or\n * parent field.\n */\n name: string;\n /**\n * The data type of the field.\n * Possible values include: 'Edm.ComplexType','Collection(Edm.ComplexType)'\n */\n type: ComplexDataType;\n /**\n * A list of sub-fields.\n */\n fields: SearchField[];\n}\n\n/**\n * Represents a synonym map definition.\n */\nexport interface SynonymMap {\n /**\n * The name of the synonym map.\n */\n name: string;\n /**\n * An array of synonym rules in the specified synonym map format.\n */\n synonyms: string[];\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your data when you want full assurance\n * that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you\n * have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore\n * attempts to set this property to null. You can change this property as needed if you want to\n * rotate your encryption key; Your data will be unaffected. Encryption with customer-managed\n * keys is not available for free search services, and is only available for paid services\n * created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n /**\n * The ETag of the synonym map.\n */\n etag?: string;\n}\n\n/**\n * An iterator for listing the indexes that exist in the Search service. Will make requests\n * as needed during iteration. Use .byPage() to make one request to the server\n * per iteration.\n */\n// eslint-disable-next-line @typescript-eslint/ban-types\nexport type IndexIterator = PagedAsyncIterableIterator<SearchIndex, SearchIndex[], {}>;\n\n/**\n * An iterator for listing the indexes that exist in the Search service. Will make requests\n * as needed during iteration. Use .byPage() to make one request to the server\n * per iteration.\n */\n// eslint-disable-next-line @typescript-eslint/ban-types\nexport type IndexNameIterator = PagedAsyncIterableIterator<string, string[], {}>;\n\n/**\n * Represents a search index definition, which describes the fields and search behavior of an\n * index.\n */\nexport interface SearchIndex {\n /**\n * The name of the index.\n */\n name: string;\n /**\n * The fields of the index.\n */\n fields: SearchField[];\n /**\n * The scoring profiles for the index.\n */\n scoringProfiles?: ScoringProfile[];\n /**\n * The name of the scoring profile to use if none is specified in the query. If this property is\n * not set and no scoring profile is specified in the query, then default scoring (tf-idf) will\n * be used.\n */\n defaultScoringProfile?: string;\n /**\n * Options to control Cross-Origin Resource Sharing (CORS) for the index.\n */\n corsOptions?: CorsOptions;\n /**\n * The suggesters for the index.\n */\n suggesters?: SearchSuggester[];\n /**\n * The analyzers for the index.\n */\n analyzers?: LexicalAnalyzer[];\n /**\n * The tokenizers for the index.\n */\n tokenizers?: LexicalTokenizer[];\n /**\n * The token filters for the index.\n */\n tokenFilters?: TokenFilter[];\n /**\n * The character filters for the index.\n */\n charFilters?: CharFilter[];\n /**\n * The normalizers for the index.\n */\n normalizers?: LexicalNormalizer[];\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your data when you want full assurance\n * that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you\n * have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore\n * attempts to set this property to null. You can change this property as needed if you want to\n * rotate your encryption key; Your data will be unaffected. Encryption with customer-managed\n * keys is not available for free search services, and is only available for paid services\n * created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n /**\n * The type of similarity algorithm to be used when scoring and ranking the documents matching a\n * search query. The similarity algorithm can only be defined at index creation time and cannot\n * be modified on existing indexes. If null, the ClassicSimilarity algorithm is used.\n */\n similarity?: SimilarityAlgorithm;\n /**\n * Defines parameters for a search index that influence semantic capabilities.\n */\n semanticSettings?: SemanticSettings;\n /**\n * The ETag of the index.\n */\n etag?: string;\n}\n\n/**\n * Represents an indexer.\n */\nexport interface SearchIndexer {\n /**\n * The name of the indexer.\n */\n name: string;\n /**\n * The description of the indexer.\n */\n description?: string;\n /**\n * The name of the datasource from which this indexer reads data.\n */\n dataSourceName: string;\n /**\n * The name of the skillset executing with this indexer.\n */\n skillsetName?: string;\n /**\n * The name of the index to which this indexer writes data.\n */\n targetIndexName: string;\n /**\n * The schedule for this indexer.\n */\n schedule?: IndexingSchedule;\n /**\n * Parameters for indexer execution.\n */\n parameters?: IndexingParameters;\n /**\n * Defines mappings between fields in the data source and corresponding target fields in the\n * index.\n */\n fieldMappings?: FieldMapping[];\n /**\n * Output field mappings are applied after enrichment and immediately before indexing.\n */\n outputFieldMappings?: FieldMapping[];\n /**\n * A value indicating whether the indexer is disabled. Default is false. Default value: false.\n */\n isDisabled?: boolean;\n /**\n * The ETag of the indexer.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your indexer definition (as well as\n * indexer execution status) when you want full assurance that no one, not even Microsoft, can\n * decrypt them in Azure Cognitive Search. Once you have encrypted your indexer definition, it\n * will always remain encrypted. Azure Cognitive Search will ignore attempts to set this property\n * to null. You can change this property as needed if you want to rotate your encryption key;\n * Your indexer definition (and indexer execution status) will be unaffected. Encryption with\n * customer-managed keys is not available for free search services, and is only available for\n * paid services created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n /**\n * Adds caching to an enrichment pipeline to allow for incremental modification steps without\n * having to rebuild the index every time.\n */\n cache?: SearchIndexerCache;\n}\n\n/**\n * A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be\n * used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym\n * maps.\n */\nexport interface SearchResourceEncryptionKey {\n /**\n * The name of your Azure Key Vault key to be used to encrypt your data at rest.\n */\n keyName: string;\n /**\n * The version of your Azure Key Vault key to be used to encrypt your data at rest.\n */\n keyVersion: string;\n /**\n * The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be\n * used to encrypt your data at rest. An example URI might be\n * https://my-keyvault-name.vault.azure.net.\n */\n vaultUrl: string;\n /**\n * An AAD Application ID that was granted the required access permissions to the Azure Key Vault\n * that is to be used when encrypting your data at rest. The Application ID should not be\n * confused with the Object ID for your AAD Application.\n */\n applicationId?: string;\n /**\n * The authentication key of the specified AAD application.\n */\n applicationSecret?: string;\n /**\n * An explicit managed identity to use for this encryption key. If not specified and the access\n * credentials property is null, the system-assigned managed identity is used. On update to the\n * resource, if the explicit identity is unspecified, it remains unchanged. If \"none\" is specified,\n * the value of this property is cleared.\n */\n identity?: SearchIndexerDataIdentity;\n}\n\n/**\n * A list of skills.\n */\nexport interface SearchIndexerSkillset {\n /**\n * The name of the skillset.\n */\n name: string;\n /**\n * The description of the skillset.\n */\n description?: string;\n /**\n * A list of skills in the skillset.\n */\n skills: SearchIndexerSkill[];\n /**\n * Details about cognitive services to be used when running skills.\n */\n cognitiveServicesAccount?: CognitiveServicesAccount;\n /**\n * Definition of additional projections to azure blob, table, or files, of enriched data.\n */\n knowledgeStore?: SearchIndexerKnowledgeStore;\n /**\n * The ETag of the skillset.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your skillset definition when you want\n * full assurance that no one, not even Microsoft, can decrypt your skillset definition in Azure\n * Cognitive Search. Once you have encrypted your skillset definition, it will always remain\n * encrypted. Azure Cognitive Search will ignore attempts to set this property to null. You can\n * change this property as needed if you want to rotate your encryption key; Your skillset\n * definition will be unaffected. Encryption with customer-managed keys is not available for free\n * search services, and is only available for paid services created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n\n/**\n * Defines parameters for a search index that influence scoring in search queries.\n */\nexport interface ScoringProfile {\n /**\n * The name of the scoring profile.\n */\n name: string;\n /**\n * Parameters that boost scoring based on text matches in certain index fields.\n */\n textWeights?: TextWeights;\n /**\n * The collection of functions that influence the scoring of documents.\n */\n functions?: ScoringFunction[];\n /**\n * A value indicating how the results of individual scoring functions should be combined.\n * Defaults to \"Sum\". Ignored if there are no scoring functions. Possible values include: 'sum',\n * 'average', 'minimum', 'maximum', 'firstMatching'\n */\n functionAggregation?: ScoringFunctionAggregation;\n}\n\n/**\n * Defines values for TokenizerName.\n * @readonly\n */\nexport enum KnownTokenizerNames {\n /**\n * Grammar-based tokenizer that is suitable for processing most European-language documents. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html\n */\n Classic = \"classic\",\n /**\n * Tokenizes the input from an edge into n-grams of the given size(s). See\n * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html\n */\n EdgeNGram = \"edgeNGram\",\n /**\n * Emits the entire input as a single token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html\n */\n Keyword = \"keyword_v2\",\n /**\n * Divides text at non-letters. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LetterTokenizer.html\n */\n Letter = \"letter\",\n /**\n * Divides text at non-letters and converts them to lower case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseTokenizer.html\n */\n Lowercase = \"lowercase\",\n /**\n * Divides text using language-specific rules.\n */\n // eslint-disable-next-line @typescript-eslint/no-shadow\n MicrosoftLanguageTokenizer = \"microsoft_language_tokenizer\",\n /**\n * Divides text using language-specific rules and reduces words to their base forms.\n */\n // eslint-disable-next-line @typescript-eslint/no-shadow\n MicrosoftLanguageStemmingTokenizer = \"microsoft_language_stemming_tokenizer\",\n /**\n * Tokenizes the input into n-grams of the given size(s). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html\n */\n NGram = \"nGram\",\n /**\n * Tokenizer for path-like hierarchies. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html\n */\n PathHierarchy = \"path_hierarchy_v2\",\n /**\n * Tokenizer that uses regex pattern matching to construct distinct tokens. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html\n */\n Pattern = \"pattern\",\n /**\n * Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop\n * filter. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html\n */\n Standard = \"standard_v2\",\n /**\n * Tokenizes urls and emails as one token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html\n */\n UaxUrlEmail = \"uax_url_email\",\n /**\n * Divides text at whitespace. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html\n */\n Whitespace = \"whitespace\",\n}\n\n/**\n * Defines values for TokenFilterName.\n * @readonly\n */\nexport enum KnownTokenFilterNames {\n /**\n * A token filter that applies the Arabic normalizer to normalize the orthography. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html\n */\n ArabicNormalization = \"arabic_normalization\",\n /**\n * Strips all characters after an apostrophe (including the apostrophe itself). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html\n */\n Apostrophe = \"apostrophe\",\n /**\n * Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127\n * ASCII characters (the \"Basic Latin\" Unicode block) into their ASCII equivalents, if such\n * equivalents exist. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html\n */\n AsciiFolding = \"asciifolding\",\n /**\n * Forms bigrams of CJK terms that are generated from StandardTokenizer. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html\n */\n CjkBigram = \"cjk_bigram\",\n /**\n * Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic\n * Latin, and half-width Katakana variants into the equivalent Kana. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html\n */\n CjkWidth = \"cjk_width\",\n /**\n * Removes English possessives, and dots from acronyms. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html\n */\n Classic = \"classic\",\n /**\n * Construct bigrams for frequently occurring terms while indexing. Single terms are still\n * indexed too, with bigrams overlaid. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html\n */\n CommonGram = \"common_grams\",\n /**\n * Generates n-grams of the given size(s) starting from the front or the back of an input token.\n * See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html\n */\n EdgeNGram = \"edgeNGram_v2\",\n /**\n * Removes elisions. For example, \"l'avion\" (the plane) will be converted to \"avion\" (plane). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html\n */\n Elision = \"elision\",\n /**\n * Normalizes German characters according to the heuristics of the German2 snowball algorithm.\n * See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html\n */\n GermanNormalization = \"german_normalization\",\n /**\n * Normalizes text in Hindi to remove some differences in spelling variations. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html\n */\n HindiNormalization = \"hindi_normalization\",\n /**\n * Normalizes the Unicode representation of text in Indian languages. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html\n */\n IndicNormalization = \"indic_normalization\",\n /**\n * Emits each incoming token twice, once as keyword and once as non-keyword. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html\n */\n KeywordRepeat = \"keyword_repeat\",\n /**\n * A high-performance kstem filter for English. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html\n */\n KStem = \"kstem\",\n /**\n * Removes words that are too long or too short. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html\n */\n Length = \"length\",\n /**\n * Limits the number of tokens while indexing. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html\n */\n Limit = \"limit\",\n /**\n * Normalizes token text to lower case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm\n */\n Lowercase = \"lowercase\",\n /**\n * Generates n-grams of the given size(s). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html\n */\n NGram = \"nGram_v2\",\n /**\n * Applies normalization for Persian. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html\n */\n PersianNormalization = \"persian_normalization\",\n /**\n * Create tokens for phonetic matches. See\n * https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html\n */\n Phonetic = \"phonetic\",\n /**\n * Uses the Porter stemming algorithm to transform the token stream. See\n * http://tartarus.org/~martin/PorterStemmer\n */\n PorterStem = \"porter_stem\",\n /**\n * Reverses the token string. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html\n */\n Reverse = \"reverse\",\n /**\n * Normalizes use of the interchangeable Scandinavian characters. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html\n */\n ScandinavianNormalization = \"scandinavian_normalization\",\n /**\n * Folds Scandinavian characters åÅäæÄÆ-&gt;a and öÖøØ-&gt;o. It also discriminates against use\n * of double vowels aa, ae, ao, oe and oo, leaving just the first one. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html\n */\n ScandinavianFoldingNormalization = \"scandinavian_folding\",\n /**\n * Creates combinations of tokens as a single token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html\n */\n Shingle = \"shingle\",\n /**\n * A filter that stems words using a Snowball-generated stemmer. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html\n */\n Snowball = \"snowball\",\n /**\n * Normalizes the Unicode representation of Sorani text. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html\n */\n SoraniNormalization = \"sorani_normalization\",\n /**\n * Language specific stemming filter. See\n * https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters\n */\n Stemmer = \"stemmer\",\n /**\n * Removes stop words from a token stream. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html\n */\n Stopwords = \"stopwords\",\n /**\n * Trims leading and trailing whitespace from tokens. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html\n */\n Trim = \"trim\",\n /**\n * Truncates the terms to a specific length. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html\n */\n Truncate = \"truncate\",\n /**\n * Filters out tokens with same text as the previous token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html\n */\n Unique = \"unique\",\n /**\n * Normalizes token text to upper case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html\n */\n Uppercase = \"uppercase\",\n /**\n * Splits words into subwords and performs optional transformations on subword groups.\n */\n WordDelimiter = \"word_delimiter\",\n}\n\n/**\n * Defines values for CharFilterName.\n * @readonly\n */\nexport enum KnownCharFilterNames {\n /**\n * A character filter that attempts to strip out HTML constructs. See\n * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.html\n */\n HtmlStrip = \"html_strip\",\n}\n\n/**\n * Defines values for AnalyzerName.\n * See https://docs.microsoft.com/rest/api/searchservice/Language-support\n * @readonly\n */\nexport enum KnownAnalyzerNames {\n /**\n * Arabic\n */\n ArMicrosoft = \"ar.microsoft\",\n /**\n * Arabic\n */\n ArLucene = \"ar.lucene\",\n /**\n * Armenian\n */\n HyLucene = \"hy.lucene\",\n /**\n * Bangla\n */\n BnMicrosoft = \"bn.microsoft\",\n /**\n * Basque\n */\n EuLucene = \"eu.lucene\",\n /**\n * Bulgarian\n */\n BgMicrosoft = \"bg.microsoft\",\n /**\n * Bulgarian\n */\n BgLucene = \"bg.lucene\",\n /**\n * Catalan\n */\n CaMicrosoft = \"ca.microsoft\",\n /**\n * Catalan\n */\n CaLucene = \"ca.lucene\",\n /**\n * Chinese Simplified\n */\n ZhHansMicrosoft = \"zh-Hans.microsoft\",\n /**\n * Chinese Simplified\n */\n ZhHansLucene = \"zh-Hans.lucene\",\n /**\n * Chinese Traditional\n */\n ZhHantMicrosoft = \"zh-Hant.microsoft\",\n /**\n * Chinese Traditional\n */\n ZhHantLucene = \"zh-Hant.lucene\",\n /**\n * Croatian\n */\n HrMicrosoft = \"hr.microsoft\",\n /**\n * Czech\n */\n CsMicrosoft = \"cs.microsoft\",\n /**\n * Czech\n */\n CsLucene = \"cs.lucene\",\n /**\n * Danish\n */\n DaMicrosoft = \"da.microsoft\",\n /**\n * Danish\n */\n DaLucene = \"da.lucene\",\n /**\n * Dutch\n */\n NlMicrosoft = \"nl.microsoft\",\n /**\n * Dutch\n */\n NlLucene = \"nl.lucene\",\n /**\n * English\n */\n EnMicrosoft = \"en.microsoft\",\n /**\n * English\n */\n EnLucene = \"en.lucene\",\n /**\n * Estonian\n */\n EtMicrosoft = \"et.microsoft\",\n /**\n * Finnish\n */\n FiMicrosoft = \"fi.microsoft\",\n /**\n * Finnish\n */\n FiLucene = \"fi.lucene\",\n /**\n * French\n */\n FrMicrosoft = \"fr.microsoft\",\n /**\n * French\n */\n FrLucene = \"fr.lucene\",\n /**\n * Galician\n */\n GlLucene = \"gl.lucene\",\n /**\n * German\n */\n DeMicrosoft = \"de.microsoft\",\n /**\n * German\n */\n DeLucene = \"de.lucene\",\n /**\n * Greek\n */\n ElMicrosoft = \"el.microsoft\",\n /**\n * Greek\n */\n ElLucene = \"el.lucene\",\n /**\n * Gujarati\n */\n GuMicrosoft = \"gu.microsoft\",\n /**\n * Hebrew\n */\n HeMicrosoft = \"he.microsoft\",\n /**\n * Hindi\n */\n HiMicrosoft = \"hi.microsoft\",\n /**\n * Hindi\n */\n HiLucene = \"hi.lucene\",\n /**\n * Hungarian\n */\n HuMicrosoft = \"hu.microsoft\",\n /**\n * Hungarian\n */\n HuLucene = \"hu.lucene\",\n /**\n * Icelandic\n */\n IsMicrosoft = \"is.microsoft\",\n /**\n * Indonesian (Bahasa)\n */\n IdMicrosoft = \"id.microsoft\",\n /**\n * Indonesian (Bahasa)\n */\n IdLucene = \"id.lucene\",\n /**\n * Irish\n */\n GaLucene = \"ga.lucene\",\n /**\n * Italian\n */\n ItMicrosoft = \"it.microsoft\",\n /**\n * Italian\n */\n ItLucene = \"it.lucene\",\n /**\n * Japanese\n */\n JaMicrosoft = \"ja.microsoft\",\n /**\n * Japanese\n */\n JaLucene = \"ja.lucene\",\n /**\n * Kannada\n */\n KnMicrosoft = \"kn.microsoft\",\n /**\n * Korean\n */\n KoMicrosoft = \"ko.microsoft\",\n /**\n * Korean\n */\n KoLucene = \"ko.lucene\",\n /**\n * Latvian\n */\n LvMicrosoft = \"lv.microsoft\",\n /**\n * Latvian\n */\n LvLucene = \"lv.lucene\",\n /**\n * Lithuanian\n */\n LtMicrosoft = \"lt.microsoft\",\n /**\n * Malayalam\n */\n MlMicrosoft = \"ml.microsoft\",\n /**\n * Malay (Latin)\n */\n MsMicrosoft = \"ms.microsoft\",\n /**\n * Marathi\n */\n MrMicrosoft = \"mr.microsoft\",\n /**\n * Norwegian\n */\n NbMicrosoft = \"nb.microsoft\",\n /**\n * Norwegian\n */\n NoLucene = \"no.lucene\",\n /**\n * Persian\n */\n FaLucene = \"fa.lucene\",\n /**\n * Polish\n */\n PlMicrosoft = \"pl.microsoft\",\n /**\n * Polish\n */\n PlLucene = \"pl.lucene\",\n /**\n * Portuguese (Brazil)\n */\n PtBRMicrosoft = \"pt-BR.microsoft\",\n /**\n * Portuguese (Brazil)\n */\n PtBRLucene = \"pt-BR.lucene\",\n /**\n * Portuguese (Portugal)\n */\n PtPTMicrosoft = \"pt-PT.microsoft\",\n /**\n * Portuguese (Portugal)\n */\n PtPTLucene = \"pt-PT.lucene\",\n /**\n * Punjabi\n */ PaMicrosoft = \"pa.microsoft\",\n /**\n * Romanian\n */\n RoMicrosoft = \"ro.microsoft\",\n /**\n * Romanian\n */\n RoLucene = \"ro.lucene\",\n /**\n * Russian\n */\n RuMicrosoft = \"ru.microsoft\",\n /**\n * Russian\n */\n RuLucene = \"ru.lucene\",\n /**\n * Serbian (Cyrillic)\n */\n SrCyrillicMicrosoft = \"sr-cyrillic.microsoft\",\n /**\n * Serbian (Latin)\n */\n SrLatinMicrosoft = \"sr-latin.microsoft\",\n /**\n * Slovak\n */\n SkMicrosoft = \"sk.microsoft\",\n /**\n * Slovenian\n */\n SlMicrosoft = \"sl.microsoft\",\n /**\n * Spanish\n */\n EsMicrosoft = \"es.microsoft\",\n /**\n * Spanish\n */\n EsLucene = \"es.lucene\",\n /**\n * Swedish\n */\n SvMicrosoft = \"sv.microsoft\",\n /**\n * Swedish\n */\n SvLucene = \"sv.lucene\",\n /**\n * Tamil\n */\n TaMicrosoft = \"ta.microsoft\",\n /**\n * Telugu\n */\n TeMicrosoft = \"te.microsoft\",\n /**\n * Thai\n */\n ThMicrosoft = \"th.microsoft\",\n /**\n * Thai\n */\n ThLucene = \"th.lucene\",\n /**\n * Turkish\n */\n TrMicrosoft = \"tr.microsoft\",\n /**\n * Turkish\n */\n TrLucene = \"tr.lucene\",\n /**\n * Ukrainian\n */\n UkMicrosoft = \"uk.microsoft\",\n /**\n * Urdu\n */\n UrMicrosoft = \"ur.microsoft\",\n /**\n * Vietnamese\n */\n ViMicrosoft = \"vi.microsoft\",\n /**\n * See: https://lucene.apache.org/core/6_6_1/core/org/apache/lucene/analysis/standard/StandardAnalyzer.html\n */\n StandardLucene = \"standard.lucene\",\n /**\n * See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html\n */\n StandardAsciiFoldingLucene = \"standardasciifolding.lucene\",\n /**\n * Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names.\n */\n Keyword = \"keyword\",\n /**\n * Flexibly separates text into terms via a regular expression pattern.\n */\n Pattern = \"pattern\",\n /**\n * Divides text at non-letters and converts them to lower case.\n */\n Simple = \"simple\",\n /**\n * Divides text at non-letters; Applies the lowercase and stopword token filters.\n */\n Stop = \"stop\",\n /**\n * An analyzer that uses the whitespace tokenizer.\n */\n Whitespace = \"whitespace\",\n}\n\n/**\n * Contains the possible cases for DataChangeDetectionPolicy.\n */\nexport type DataChangeDetectionPolicy =\n | HighWaterMarkChangeDetectionPolicy\n | SqlIntegratedChangeTrackingPolicy;\n\n/**\n * Contains the possible cases for SearchIndexerDataIdentity.\n */\nexport type SearchIndexerDataIdentity =\n | SearchIndexerDataNoneIdentity\n | SearchIndexerDataUserAssignedIdentity;\n\n/**\n * Contains the possible cases for DataDeletionDetectionPolicy.\n */\nexport type DataDeletionDetectionPolicy = SoftDeleteColumnDeletionDetectionPolicy;\n\n/**\n * Represents a datasource definition, which can be used to configure an indexer.\n */\nexport interface SearchIndexerDataSourceConnection {\n /**\n * The name of the datasource.\n */\n name: string;\n /**\n * The description of the datasource.\n */\n description?: string;\n /**\n * The type of the datasource. Possible values include: 'AzureSql', 'CosmosDb', 'AzureBlob',\n * 'AzureTable', 'MySql', 'AdlsGen2'\n */\n type: SearchIndexerDataSourceType;\n /**\n * The connection string for the datasource.\n */\n connectionString?: string;\n /**\n * The data container for the datasource.\n */\n container: SearchIndexerDataContainer;\n /**\n * An explicit managed identity to use for this datasource. If not specified and the connection\n * string is a managed identity, the system-assigned managed identity is used. If not specified,\n * the value remains unchanged. If \"none\" is specified, the value of this property is cleared.\n */\n identity?: SearchIndexerDataIdentity;\n /**\n * The data change detection policy for the datasource.\n */\n dataChangeDetectionPolicy?: DataChangeDetectionPolicy;\n /**\n * The data deletion detection policy for the datasource.\n */\n dataDeletionDetectionPolicy?: DataDeletionDetectionPolicy;\n /**\n * The ETag of the DataSource.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your datasource definition when you want\n * full assurance that no one, not even Microsoft, can decrypt your data source definition in\n * Azure Cognitive Search. Once you have encrypted your data source definition, it will always\n * remain encrypted. Azure Cognitive Search will ignore attempts to set this property to null.\n * You can change this property as needed if you want to rotate your encryption key; Your\n * datasource definition will be unaffected. Encryption with customer-managed keys is not\n * available for free search services, and is only available for paid services created on or\n * after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n// END manually modified generated interfaces\n"]}
1
+ {"version":3,"file":"serviceModels.js","sourceRoot":"","sources":["../../src/serviceModels.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;AAyyBlC,MAAM,UAAU,cAAc,CAAC,KAAkB;IAC/C,OAAO,KAAK,CAAC,IAAI,KAAK,iBAAiB,IAAI,KAAK,CAAC,IAAI,KAAK,6BAA6B,CAAC;AAC1F,CAAC;AA0SD;;;GAGG;AACH,MAAM,CAAN,IAAY,mBAmEX;AAnED,WAAY,mBAAmB;IAC7B;;;OAGG;IACH,0CAAmB,CAAA;IACnB;;;OAGG;IACH,8CAAuB,CAAA;IACvB;;;OAGG;IACH,6CAAsB,CAAA;IACtB;;;OAGG;IACH,wCAAiB,CAAA;IACjB;;;OAGG;IACH,8CAAuB,CAAA;IACvB;;OAEG;IACH,wDAAwD;IACxD,kFAA2D,CAAA;IAC3D;;OAEG;IACH,wDAAwD;IACxD,mGAA4E,CAAA;IAC5E;;;OAGG;IACH,sCAAe,CAAA;IACf;;;OAGG;IACH,0DAAmC,CAAA;IACnC;;;OAGG;IACH,0CAAmB,CAAA;IACnB;;;;OAIG;IACH,+CAAwB,CAAA;IACxB;;;OAGG;IACH,oDAA6B,CAAA;IAC7B;;;OAGG;IACH,gDAAyB,CAAA;AAC3B,CAAC,EAnEW,mBAAmB,KAAnB,mBAAmB,QAmE9B;AAED;;;GAGG;AACH,MAAM,CAAN,IAAY,qBAiLX;AAjLD,WAAY,qBAAqB;IAC/B;;;OAGG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,kDAAyB,CAAA;IACzB;;;;;OAKG;IACH,sDAA6B,CAAA;IAC7B;;;OAGG;IACH,iDAAwB,CAAA;IACxB;;;;OAIG;IACH,+CAAsB,CAAA;IACtB;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;;OAIG;IACH,oDAA2B,CAAA;IAC3B;;;;OAIG;IACH,mDAA0B,CAAA;IAC1B;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;;OAIG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,mEAA0C,CAAA;IAC1C;;;OAGG;IACH,mEAA0C,CAAA;IAC1C;;;OAGG;IACH,yDAAgC,CAAA;IAChC;;;OAGG;IACH,wCAAe,CAAA;IACf;;;OAGG;IACH,0CAAiB,CAAA;IACjB;;;OAGG;IACH,wCAAe,CAAA;IACf;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;;OAGG;IACH,2CAAkB,CAAA;IAClB;;;OAGG;IACH,uEAA8C,CAAA;IAC9C;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,mDAA0B,CAAA;IAC1B;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,iFAAwD,CAAA;IACxD;;;;OAIG;IACH,kFAAyD,CAAA;IACzD;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;;OAGG;IACH,sCAAa,CAAA;IACb;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,0CAAiB,CAAA;IACjB;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;OAEG;IACH,yDAAgC,CAAA;AAClC,CAAC,EAjLW,qBAAqB,KAArB,qBAAqB,QAiLhC;AAED;;;GAGG;AACH,MAAM,CAAN,IAAY,oBAMX;AAND,WAAY,oBAAoB;IAC9B;;;OAGG;IACH,gDAAwB,CAAA;AAC1B,CAAC,EANW,oBAAoB,KAApB,oBAAoB,QAM/B;AAED;;;;GAIG;AACH,MAAM,CAAN,IAAY,kBAoXX;AApXD,WAAY,kBAAkB;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,2DAAqC,CAAA;IACrC;;OAEG;IACH,qDAA+B,CAAA;IAC/B;;OAEG;IACH,2DAAqC,CAAA;IACrC;;OAEG;IACH,qDAA+B,CAAA;IAC/B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,uDAAiC,CAAA;IACjC;;OAEG;IACH,iDAA2B,CAAA;IAC3B;;OAEG;IACH,uDAAiC,CAAA;IACjC;;OAEG;IACH,iDAA2B,CAAA;IAC3B;;OAEG,CAAC,kDAA4B,CAAA;IAChC;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,mEAA6C,CAAA;IAC7C;;OAEG;IACH,6DAAuC,CAAA;IACvC;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,wDAAkC,CAAA;IAClC;;OAEG;IACH,gFAA0D,CAAA;IAC1D;;OAEG;IACH,yCAAmB,CAAA;IACnB;;OAEG;IACH,yCAAmB,CAAA;IACnB;;OAEG;IACH,uCAAiB,CAAA;IACjB;;OAEG;IACH,mCAAa,CAAA;IACb;;OAEG;IACH,+CAAyB,CAAA;AAC3B,CAAC,EApXW,kBAAkB,KAAlB,kBAAkB,QAoX7B;AAgED,6CAA6C","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT license.\n\nimport { OperationOptions } from \"@azure/core-http\";\nimport {\n LuceneStandardAnalyzer,\n StopAnalyzer,\n CorsOptions,\n Suggester as SearchSuggester,\n ClassicTokenizer,\n EdgeNGramTokenizer,\n MicrosoftLanguageTokenizer,\n MicrosoftLanguageStemmingTokenizer,\n NGramTokenizer,\n PathHierarchyTokenizerV2 as PathHierarchyTokenizer,\n UaxUrlEmailTokenizer,\n AsciiFoldingTokenFilter,\n CjkBigramTokenFilter,\n CommonGramTokenFilter,\n DictionaryDecompounderTokenFilter,\n LengthTokenFilter,\n ElisionTokenFilter,\n KeepTokenFilter,\n KeywordMarkerTokenFilter,\n LimitTokenFilter,\n PatternCaptureTokenFilter,\n PatternReplaceTokenFilter,\n PhoneticTokenFilter,\n ShingleTokenFilter,\n SnowballTokenFilter,\n StemmerTokenFilter,\n StemmerOverrideTokenFilter,\n StopwordsTokenFilter,\n SynonymTokenFilter,\n TruncateTokenFilter,\n UniqueTokenFilter,\n WordDelimiterTokenFilter,\n MappingCharFilter,\n PatternReplaceCharFilter,\n DistanceScoringFunction,\n FreshnessScoringFunction,\n MagnitudeScoringFunction,\n TagScoringFunction,\n TextWeights,\n ScoringFunctionAggregation,\n RegexFlags,\n ConditionalSkill,\n KeyPhraseExtractionSkill,\n OcrSkill,\n ImageAnalysisSkill,\n LanguageDetectionSkill,\n ShaperSkill,\n MergeSkill,\n EntityRecognitionSkill,\n SentimentSkill,\n SplitSkill,\n CustomEntityLookupSkill,\n DocumentExtractionSkill,\n TextTranslationSkill,\n WebApiSkill,\n DefaultCognitiveServicesAccount,\n CognitiveServicesAccountKey,\n HighWaterMarkChangeDetectionPolicy,\n SqlIntegratedChangeTrackingPolicy,\n SoftDeleteColumnDeletionDetectionPolicy,\n SearchIndexerDataSourceType,\n SearchIndexerDataContainer,\n LexicalAnalyzerName,\n ClassicSimilarity,\n BM25Similarity,\n EdgeNGramTokenFilterSide,\n ServiceCounters,\n ServiceLimits,\n FieldMapping,\n IndexingParameters,\n IndexingSchedule,\n SearchIndexerKnowledgeStore\n} from \"./generated/service/models\";\n\nimport { PagedAsyncIterableIterator } from \"@azure/core-paging\";\n\n/**\n * Options for a list skillsets operation.\n */\nexport type ListSkillsetsOptions = OperationOptions;\n\n/**\n * Options for a list synonymMaps operation.\n */\nexport type ListSynonymMapsOptions = OperationOptions;\n\n/**\n * Options for a list indexes operation.\n */\nexport type ListIndexesOptions = OperationOptions;\n\n/**\n * Options for a list indexers operation.\n */\nexport type ListIndexersOptions = OperationOptions;\n\n/**\n * Options for a list data sources operation.\n */\nexport type ListDataSourceConnectionsOptions = OperationOptions;\n\n/**\n * Options for get index operation.\n */\nexport type GetIndexOptions = OperationOptions;\n\n/**\n * Options for get skillset operation.\n */\nexport type GetSkillSetOptions = OperationOptions;\n\n/**\n * Options for get synonymmaps operation.\n */\nexport type GetSynonymMapsOptions = OperationOptions;\n\n/**\n * Options for get indexer operation.\n */\nexport type GetIndexerOptions = OperationOptions;\n\n/**\n * Options for get datasource operation.\n */\nexport type GetDataSourceConnectionOptions = OperationOptions;\n\n/**\n * Options for get index statistics operation.\n */\nexport type GetIndexStatisticsOptions = OperationOptions;\n\n/**\n * Statistics for a given index. Statistics are collected periodically and are not guaranteed to\n * always be up-to-date.\n */\nexport interface SearchIndexStatistics {\n /**\n * The number of documents in the index.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly documentCount: number;\n /**\n * The amount of storage in bytes consumed by the index.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly storageSize: number;\n}\n\n/**\n * Response from a get service statistics request. If successful, it includes service level\n * counters and limits.\n */\nexport interface SearchServiceStatistics {\n /**\n * Service level resource counters.\n */\n counters: ServiceCounters;\n /**\n * Service level general limits.\n */\n limits: ServiceLimits;\n}\n\n/**\n * Options for get service statistics operation.\n */\nexport type GetServiceStatisticsOptions = OperationOptions;\n\n/**\n * Options for get indexer status operation.\n */\nexport type GetIndexerStatusOptions = OperationOptions;\n\n/**\n * Options for reset indexer operation.\n */\nexport type ResetIndexerOptions = OperationOptions;\n\n/**\n * Options for run indexer operation.\n */\nexport type RunIndexerOptions = OperationOptions;\n\n/**\n * Options for create index operation.\n */\nexport type CreateIndexOptions = OperationOptions;\n\n/**\n * Options for create skillset operation.\n */\nexport type CreateSkillsetOptions = OperationOptions;\n\n/**\n * Options for create synonymmap operation.\n */\nexport type CreateSynonymMapOptions = OperationOptions;\n\n/**\n * Options for create indexer operation.\n */\nexport type CreateIndexerOptions = OperationOptions;\n\n/**\n * Options for create datasource operation.\n */\nexport type CreateDataSourceConnectionOptions = OperationOptions;\n\n/**\n * Options for create/update index operation.\n */\nexport interface CreateOrUpdateIndexOptions extends OperationOptions {\n /**\n * Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by\n * taking the index offline for at least a few seconds. This temporarily causes indexing and\n * query requests to fail. Performance and write availability of the index can be impaired for\n * several minutes after the index is updated, or longer for very large indexes.\n */\n allowIndexDowntime?: boolean;\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for create/update skillset operation.\n */\nexport interface CreateOrUpdateSkillsetOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for create/update synonymmap operation.\n */\nexport interface CreateOrUpdateSynonymMapOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for create/update indexer operation.\n */\nexport interface CreateorUpdateIndexerOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for create/update datasource operation.\n */\nexport interface CreateorUpdateDataSourceConnectionOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete index operation.\n */\nexport interface DeleteIndexOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete skillset operaion.\n */\nexport interface DeleteSkillsetOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete synonymmap operation.\n */\nexport interface DeleteSynonymMapOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete indexer operation.\n */\nexport interface DeleteIndexerOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete datasource operation.\n */\nexport interface DeleteDataSourceConnectionOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Specifies some text and analysis components used to break that text into tokens.\n */\nexport interface AnalyzeRequest {\n /**\n * The text to break into tokens.\n */\n text: string;\n /**\n * The name of the analyzer to use to break the given text. If this parameter is not specified,\n * you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually\n * exclusive. KnownAnalyzerNames is an enum containing known values.\n * NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.\n */\n analyzerName?: string;\n /**\n * The name of the tokenizer to use to break the given text. If this parameter is not specified,\n * you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually\n * exclusive. KnownTokenizerNames is an enum containing known values.\n * NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.\n */\n tokenizerName?: string;\n /**\n * An optional list of token filters to use when breaking the given text. This parameter can only\n * be set when using the tokenizer parameter.\n */\n tokenFilters?: string[];\n /**\n * An optional list of character filters to use when breaking the given text. This parameter can\n * only be set when using the tokenizer parameter.\n */\n charFilters?: string[];\n}\n\n/**\n * Options for analyze text operation.\n */\nexport type AnalyzeTextOptions = OperationOptions & AnalyzeRequest;\n\n// BEGIN manually modified generated interfaces\n//\n// This section is for places where we have to manually fix issues\n// with interfaces from the generated code.\n// One issue is that unions of discriminated types generated with\n// their abstract base class as a member.\n\n/**\n * Flexibly separates text into terms via a regular expression pattern. This analyzer is\n * implemented using Apache Lucene.\n */\nexport interface PatternAnalyzer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.PatternAnalyzer\";\n /**\n * The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores,\n * can only start and end with alphanumeric characters, and is limited to 128 characters.\n */\n name: string;\n /**\n * A value indicating whether terms should be lower-cased. Default is true. Default value: true.\n */\n lowerCaseTerms?: boolean;\n /**\n * A regular expression pattern to match token separators. Default is an expression that matches\n * one or more whitespace characters. Default value: `\\W+`.\n */\n pattern?: string;\n /**\n * Regular expression flags. Possible values include: 'CANON_EQ', 'CASE_INSENSITIVE', 'COMMENTS',\n * 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'\n */\n flags?: RegexFlags[];\n /**\n * A list of stopwords.\n */\n stopwords?: string[];\n}\n\n/**\n * Allows you to take control over the process of converting text into indexable/searchable tokens.\n * It's a user-defined configuration consisting of a single predefined tokenizer and one or more\n * filters. The tokenizer is responsible for breaking text into tokens, and the filters for\n * modifying tokens emitted by the tokenizer.\n */\nexport interface CustomAnalyzer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.CustomAnalyzer\";\n /**\n * The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores,\n * can only start and end with alphanumeric characters, and is limited to 128 characters.\n */\n name: string;\n /**\n * The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as\n * breaking a sentence into words. KnownTokenizerNames is an enum containing known values.\n */\n tokenizerName: string;\n /**\n * A list of token filters used to filter out or modify the tokens generated by a tokenizer. For\n * example, you can specify a lowercase filter that converts all characters to lowercase. The\n * filters are run in the order in which they are listed.\n */\n tokenFilters?: string[];\n /**\n * A list of character filters used to prepare input text before it is processed by the\n * tokenizer. For instance, they can replace certain characters or symbols. The filters are run\n * in the order in which they are listed.\n */\n charFilters?: string[];\n}\n\n/**\n * Contains the possible cases for Analyzer.\n */\nexport type LexicalAnalyzer =\n | CustomAnalyzer\n | PatternAnalyzer\n | LuceneStandardAnalyzer\n | StopAnalyzer;\n\n/**\n * Contains the possible cases for Skill.\n */\nexport type SearchIndexerSkill =\n | ConditionalSkill\n | KeyPhraseExtractionSkill\n | OcrSkill\n | ImageAnalysisSkill\n | LanguageDetectionSkill\n | ShaperSkill\n | MergeSkill\n | EntityRecognitionSkill\n | SentimentSkill\n | SplitSkill\n | CustomEntityLookupSkill\n | TextTranslationSkill\n | DocumentExtractionSkill\n | WebApiSkill;\n\n/**\n * Contains the possible cases for CognitiveServicesAccount.\n */\nexport type CognitiveServicesAccount =\n | DefaultCognitiveServicesAccount\n | CognitiveServicesAccountKey;\n/**\n * Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is\n * implemented using Apache Lucene.\n */\nexport interface PatternTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.PatternTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * A regular expression pattern to match token separators. Default is an expression that matches\n * one or more whitespace characters. Default value: `\\W+`.\n */\n pattern?: string;\n /**\n * Regular expression flags. Possible values include: 'CANON_EQ', 'CASE_INSENSITIVE', 'COMMENTS',\n * 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'\n */\n flags?: RegexFlags[];\n /**\n * The zero-based ordinal of the matching group in the regular expression pattern to extract into\n * tokens. Use -1 if you want to use the entire pattern to split the input into tokens,\n * irrespective of matching groups. Default is -1. Default value: -1.\n */\n group?: number;\n}\n/**\n * Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using\n * Apache Lucene.\n */\nexport interface LuceneStandardTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.StandardTokenizerV2\"\n | \"#Microsoft.Azure.Search.StandardTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The maximum token length. Default is 255. Tokens longer than the maximum length are split. The\n * maximum token length that can be used is 300 characters. Default value: 255.\n */\n maxTokenLength?: number;\n}\n\n/**\n * Generates n-grams of the given size(s) starting from the front or the back of an input token.\n * This token filter is implemented using Apache Lucene.\n */\nexport interface EdgeNGramTokenFilter {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.EdgeNGramTokenFilterV2\"\n | \"#Microsoft.Azure.Search.EdgeNGramTokenFilter\";\n /**\n * The name of the token filter. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of\n * maxGram. Default value: 1.\n */\n minGram?: number;\n /**\n * The maximum n-gram length. Default is 2. Maximum is 300. Default value: 2.\n */\n maxGram?: number;\n /**\n * Specifies which side of the input the n-gram should be generated from. Default is \"front\".\n * Possible values include: 'Front', 'Back'\n */\n side?: EdgeNGramTokenFilterSide;\n}\n\n/**\n * Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene.\n */\nexport interface KeywordTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.KeywordTokenizerV2\"\n | \"#Microsoft.Azure.Search.KeywordTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The maximum token length. Default is 256. Tokens longer than the maximum length are split. The\n * maximum token length that can be used is 300 characters. Default value: 256.\n */\n maxTokenLength?: number;\n}\n\n/**\n * Contains the possible cases for Tokenizer.\n */\nexport type LexicalTokenizer =\n | ClassicTokenizer\n | EdgeNGramTokenizer\n | KeywordTokenizer\n | MicrosoftLanguageTokenizer\n | MicrosoftLanguageStemmingTokenizer\n | NGramTokenizer\n | PathHierarchyTokenizer\n | PatternTokenizer\n | LuceneStandardTokenizer\n | UaxUrlEmailTokenizer;\n\n/**\n * Contains the possible cases for Similarity.\n */\nexport type SimilarityAlgorithm = ClassicSimilarity | BM25Similarity;\n\n/**\n * Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene.\n */\nexport interface NGramTokenFilter {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.NGramTokenFilterV2\"\n | \"#Microsoft.Azure.Search.NGramTokenFilter\";\n /**\n * The name of the token filter. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of\n * maxGram. Default value: 1.\n */\n minGram?: number;\n /**\n * The maximum n-gram length. Default is 2. Maximum is 300. Default value: 2.\n */\n maxGram?: number;\n}\n\n/**\n * Contains the possible cases for TokenFilter.\n */\nexport type TokenFilter =\n | AsciiFoldingTokenFilter\n | CjkBigramTokenFilter\n | CommonGramTokenFilter\n | DictionaryDecompounderTokenFilter\n | EdgeNGramTokenFilter\n | ElisionTokenFilter\n | KeepTokenFilter\n | KeywordMarkerTokenFilter\n | LengthTokenFilter\n | LimitTokenFilter\n | NGramTokenFilter\n | PatternCaptureTokenFilter\n | PatternReplaceTokenFilter\n | PhoneticTokenFilter\n | ShingleTokenFilter\n | SnowballTokenFilter\n | StemmerTokenFilter\n | StemmerOverrideTokenFilter\n | StopwordsTokenFilter\n | SynonymTokenFilter\n | TruncateTokenFilter\n | UniqueTokenFilter\n | WordDelimiterTokenFilter;\n\n/**\n * Contains the possible cases for CharFilter.\n */\nexport type CharFilter = MappingCharFilter | PatternReplaceCharFilter;\n\n/**\n * Contains the possible cases for ScoringFunction.\n */\nexport type ScoringFunction =\n | DistanceScoringFunction\n | FreshnessScoringFunction\n | MagnitudeScoringFunction\n | TagScoringFunction;\n\n/**\n * Defines values for SearchFieldDataType.\n * Possible values include: 'Edm.String', 'Edm.Int32', 'Edm.Int64', 'Edm.Double', 'Edm.Boolean',\n * 'Edm.DateTimeOffset', 'Edm.GeographyPoint', 'Collection(Edm.String)',\n * 'Collection(Edm.Int32)', 'Collection(Edm.Int64)', 'Collection(Edm.Double)',\n * 'Collection(Edm.Boolean)', 'Collection(Edm.DateTimeOffset)', 'Collection(Edm.GeographyPoint)'\n * @readonly\n */\nexport type SearchFieldDataType =\n | \"Edm.String\"\n | \"Edm.Int32\"\n | \"Edm.Int64\"\n | \"Edm.Double\"\n | \"Edm.Boolean\"\n | \"Edm.DateTimeOffset\"\n | \"Edm.GeographyPoint\"\n | \"Collection(Edm.String)\"\n | \"Collection(Edm.Int32)\"\n | \"Collection(Edm.Int64)\"\n | \"Collection(Edm.Double)\"\n | \"Collection(Edm.Boolean)\"\n | \"Collection(Edm.DateTimeOffset)\"\n | \"Collection(Edm.GeographyPoint)\";\n\n/**\n * Defines values for ComplexDataType.\n * Possible values include: 'Edm.ComplexType', 'Collection(Edm.ComplexType)'\n * @readonly\n */\nexport type ComplexDataType = \"Edm.ComplexType\" | \"Collection(Edm.ComplexType)\";\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport type SearchField = SimpleField | ComplexField;\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport interface SimpleField {\n /**\n * The name of the field, which must be unique within the fields collection of the index or\n * parent field.\n */\n name: string;\n /**\n * The data type of the field. Possible values include: 'Edm.String', 'Edm.Int32', 'Edm.Int64',\n * 'Edm.Double', 'Edm.Boolean', 'Edm.DateTimeOffset', 'Edm.GeographyPoint'\n * 'Collection(Edm.String)', 'Collection(Edm.Int32)', 'Collection(Edm.Int64)',\n * 'Collection(Edm.Double)', 'Collection(Edm.Boolean)', 'Collection(Edm.DateTimeOffset)',\n * 'Collection(Edm.GeographyPoint)'\n */\n type: SearchFieldDataType;\n /**\n * A value indicating whether the field uniquely identifies documents in the index. Exactly one\n * top-level field in each index must be chosen as the key field and it must be of type\n * Edm.String. Key fields can be used to look up documents directly and update or delete specific\n * documents. Default is false.\n */\n key?: boolean;\n /**\n * A value indicating whether the field can be returned in a search result. You can enable this\n * option if you want to use a field (for example, margin) as a filter, sorting, or scoring\n * mechanism but do not want the field to be visible to the end user. This property must be false\n * for key fields. This property can be changed on existing fields.\n * Disabling this property does not cause any increase in index storage requirements.\n * Default is false.\n */\n hidden?: boolean;\n /**\n * A value indicating whether the field is full-text searchable. This means it will undergo\n * analysis such as word-breaking during indexing. If you set a searchable field to a value like\n * \"sunny day\", internally it will be split into the individual tokens \"sunny\" and \"day\". This\n * enables full-text searches for these terms. This property must be false for simple\n * fields of other non-string data types.\n * Note: searchable fields consume extra space in your index since Azure Cognitive Search will store an\n * additional tokenized version of the field value for full-text searches.\n * Defaults to false for simple fields.\n */\n searchable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in $filter queries. Filterable\n * differs from searchable in how strings are handled. Fields of type Edm.String or\n * Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are\n * for exact matches only. For example, if you set such a field f to \"sunny day\", $filter=f eq\n * 'sunny' will find no matches, but $filter=f eq 'sunny day' will.\n * Default is false.\n */\n filterable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in $orderby expressions. By\n * default Azure Cognitive Search sorts results by score, but in many experiences users will want\n * to sort by fields in the documents. A simple field can be sortable only if it is single-valued\n * (it has a single value in the scope of the parent document). Simple collection fields cannot\n * be sortable, since they are multi-valued. Simple sub-fields of complex collections are also\n * multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent\n * field, or an ancestor field, that's the complex collection. The default for sortable is false.\n */\n sortable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in facet queries. Typically\n * used in a presentation of search results that includes hit count by category (for example,\n * search for digital cameras and see hits by brand, by megapixels, by price, and so on).\n * Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable.\n * Default is false for all other simple fields.\n */\n facetable?: boolean;\n /**\n * The name of the language analyzer to use for the field. This option can be used only with\n * searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer.\n * Once the analyzer is chosen, it cannot be changed for the field.\n * KnownAnalyzerNames is an enum containing known values.\n */\n analyzerName?: LexicalAnalyzerName;\n /**\n * The name of the analyzer used at search time for the field. This option can be used only with\n * searchable fields. It must be set together with indexAnalyzer and it cannot be set together\n * with the analyzer option. This analyzer can be updated on an existing field.\n * KnownAnalyzerNames is an enum containing known values.\n */\n searchAnalyzerName?: LexicalAnalyzerName;\n /**\n * The name of the analyzer used at indexing time for the field. This option can be used only\n * with searchable fields. It must be set together with searchAnalyzer and it cannot be set\n * together with the analyzer option. Once the analyzer is chosen, it cannot be changed for the\n * field. KnownAnalyzerNames is an enum containing known values.\n */\n indexAnalyzerName?: LexicalAnalyzerName;\n /**\n * A list of the names of synonym maps to associate with this field. This option can be used only\n * with searchable fields. Currently only one synonym map per field is supported. Assigning a\n * synonym map to a field ensures that query terms targeting that field are expanded at\n * query-time using the rules in the synonym map. This attribute can be changed on existing\n * fields.\n */\n synonymMapNames?: string[];\n}\n\nexport function isComplexField(field: SearchField): field is ComplexField {\n return field.type === \"Edm.ComplexType\" || field.type === \"Collection(Edm.ComplexType)\";\n}\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport interface ComplexField {\n /**\n * The name of the field, which must be unique within the fields collection of the index or\n * parent field.\n */\n name: string;\n /**\n * The data type of the field.\n * Possible values include: 'Edm.ComplexType','Collection(Edm.ComplexType)'\n */\n type: ComplexDataType;\n /**\n * A list of sub-fields.\n */\n fields: SearchField[];\n}\n\n/**\n * Represents a synonym map definition.\n */\nexport interface SynonymMap {\n /**\n * The name of the synonym map.\n */\n name: string;\n /**\n * An array of synonym rules in the specified synonym map format.\n */\n synonyms: string[];\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your data when you want full assurance\n * that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you\n * have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore\n * attempts to set this property to null. You can change this property as needed if you want to\n * rotate your encryption key; Your data will be unaffected. Encryption with customer-managed\n * keys is not available for free search services, and is only available for paid services\n * created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey | null;\n /**\n * The ETag of the synonym map.\n */\n etag?: string;\n}\n\n/**\n * An iterator for listing the indexes that exist in the Search service. Will make requests\n * as needed during iteration. Use .byPage() to make one request to the server\n * per iteration.\n */\n// eslint-disable-next-line @typescript-eslint/ban-types\nexport type IndexIterator = PagedAsyncIterableIterator<SearchIndex, SearchIndex[], {}>;\n\n/**\n * An iterator for listing the indexes that exist in the Search service. Will make requests\n * as needed during iteration. Use .byPage() to make one request to the server\n * per iteration.\n */\n// eslint-disable-next-line @typescript-eslint/ban-types\nexport type IndexNameIterator = PagedAsyncIterableIterator<string, string[], {}>;\n\n/**\n * Represents a search index definition, which describes the fields and search behavior of an\n * index.\n */\nexport interface SearchIndex {\n /**\n * The name of the index.\n */\n name: string;\n /**\n * The fields of the index.\n */\n fields: SearchField[];\n /**\n * The scoring profiles for the index.\n */\n scoringProfiles?: ScoringProfile[];\n /**\n * The name of the scoring profile to use if none is specified in the query. If this property is\n * not set and no scoring profile is specified in the query, then default scoring (tf-idf) will\n * be used.\n */\n defaultScoringProfile?: string;\n /**\n * Options to control Cross-Origin Resource Sharing (CORS) for the index.\n */\n corsOptions?: CorsOptions | null;\n /**\n * The suggesters for the index.\n */\n suggesters?: SearchSuggester[];\n /**\n * The analyzers for the index.\n */\n analyzers?: LexicalAnalyzer[];\n /**\n * The tokenizers for the index.\n */\n tokenizers?: LexicalTokenizer[];\n /**\n * The token filters for the index.\n */\n tokenFilters?: TokenFilter[];\n /**\n * The character filters for the index.\n */\n charFilters?: CharFilter[];\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your data when you want full assurance\n * that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you\n * have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore\n * attempts to set this property to null. You can change this property as needed if you want to\n * rotate your encryption key; Your data will be unaffected. Encryption with customer-managed\n * keys is not available for free search services, and is only available for paid services\n * created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey | null;\n /**\n * The type of similarity algorithm to be used when scoring and ranking the documents matching a\n * search query. The similarity algorithm can only be defined at index creation time and cannot\n * be modified on existing indexes. If null, the ClassicSimilarity algorithm is used.\n */\n similarity?: SimilarityAlgorithm;\n /**\n * The ETag of the index.\n */\n etag?: string;\n}\n\n/**\n * Represents an indexer.\n */\nexport interface SearchIndexer {\n /**\n * The name of the indexer.\n */\n name: string;\n /**\n * The description of the indexer.\n */\n description?: string;\n /**\n * The name of the datasource from which this indexer reads data.\n */\n dataSourceName: string;\n /**\n * The name of the skillset executing with this indexer.\n */\n skillsetName?: string;\n /**\n * The name of the index to which this indexer writes data.\n */\n targetIndexName: string;\n /**\n * The schedule for this indexer.\n */\n schedule?: IndexingSchedule | null;\n /**\n * Parameters for indexer execution.\n */\n parameters?: IndexingParameters | null;\n /**\n * Defines mappings between fields in the data source and corresponding target fields in the\n * index.\n */\n fieldMappings?: FieldMapping[];\n /**\n * Output field mappings are applied after enrichment and immediately before indexing.\n */\n outputFieldMappings?: FieldMapping[];\n /**\n * A value indicating whether the indexer is disabled. Default is false. Default value: false.\n */\n isDisabled?: boolean | null;\n /**\n * The ETag of the indexer.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your indexer definition (as well as\n * indexer execution status) when you want full assurance that no one, not even Microsoft, can\n * decrypt them in Azure Cognitive Search. Once you have encrypted your indexer definition, it\n * will always remain encrypted. Azure Cognitive Search will ignore attempts to set this property\n * to null. You can change this property as needed if you want to rotate your encryption key;\n * Your indexer definition (and indexer execution status) will be unaffected. Encryption with\n * customer-managed keys is not available for free search services, and is only available for\n * paid services created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey | null;\n}\n\n/**\n * A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be\n * used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym\n * maps.\n */\nexport interface SearchResourceEncryptionKey {\n /**\n * The name of your Azure Key Vault key to be used to encrypt your data at rest.\n */\n keyName: string;\n /**\n * The version of your Azure Key Vault key to be used to encrypt your data at rest.\n */\n keyVersion: string;\n /**\n * The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be\n * used to encrypt your data at rest. An example URI might be\n * https://my-keyvault-name.vault.azure.net.\n */\n vaultUrl: string;\n /**\n * An AAD Application ID that was granted the required access permissions to the Azure Key Vault\n * that is to be used when encrypting your data at rest. The Application ID should not be\n * confused with the Object ID for your AAD Application.\n */\n applicationId?: string;\n /**\n * The authentication key of the specified AAD application.\n */\n applicationSecret?: string;\n}\n\n/**\n * A list of skills.\n */\nexport interface SearchIndexerSkillset {\n /**\n * The name of the skillset.\n */\n name: string;\n /**\n * The description of the skillset.\n */\n description?: string;\n /**\n * A list of skills in the skillset.\n */\n skills: SearchIndexerSkill[];\n /**\n * Details about cognitive services to be used when running skills.\n */\n cognitiveServicesAccount?: CognitiveServicesAccount;\n /**\n * Definition of additional projections to azure blob, table, or files, of enriched data.\n */\n knowledgeStore?: SearchIndexerKnowledgeStore;\n /**\n * The ETag of the skillset.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your skillset definition when you want\n * full assurance that no one, not even Microsoft, can decrypt your skillset definition in Azure\n * Cognitive Search. Once you have encrypted your skillset definition, it will always remain\n * encrypted. Azure Cognitive Search will ignore attempts to set this property to null. You can\n * change this property as needed if you want to rotate your encryption key; Your skillset\n * definition will be unaffected. Encryption with customer-managed keys is not available for free\n * search services, and is only available for paid services created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey | null;\n}\n\n/**\n * Defines parameters for a search index that influence scoring in search queries.\n */\nexport interface ScoringProfile {\n /**\n * The name of the scoring profile.\n */\n name: string;\n /**\n * Parameters that boost scoring based on text matches in certain index fields.\n */\n textWeights?: TextWeights;\n /**\n * The collection of functions that influence the scoring of documents.\n */\n functions?: ScoringFunction[];\n /**\n * A value indicating how the results of individual scoring functions should be combined.\n * Defaults to \"Sum\". Ignored if there are no scoring functions. Possible values include: 'sum',\n * 'average', 'minimum', 'maximum', 'firstMatching'\n */\n functionAggregation?: ScoringFunctionAggregation;\n}\n\n/**\n * Defines values for TokenizerName.\n * @readonly\n */\nexport enum KnownTokenizerNames {\n /**\n * Grammar-based tokenizer that is suitable for processing most European-language documents. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html\n */\n Classic = \"classic\",\n /**\n * Tokenizes the input from an edge into n-grams of the given size(s). See\n * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html\n */\n EdgeNGram = \"edgeNGram\",\n /**\n * Emits the entire input as a single token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html\n */\n Keyword = \"keyword_v2\",\n /**\n * Divides text at non-letters. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LetterTokenizer.html\n */\n Letter = \"letter\",\n /**\n * Divides text at non-letters and converts them to lower case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseTokenizer.html\n */\n Lowercase = \"lowercase\",\n /**\n * Divides text using language-specific rules.\n */\n // eslint-disable-next-line @typescript-eslint/no-shadow\n MicrosoftLanguageTokenizer = \"microsoft_language_tokenizer\",\n /**\n * Divides text using language-specific rules and reduces words to their base forms.\n */\n // eslint-disable-next-line @typescript-eslint/no-shadow\n MicrosoftLanguageStemmingTokenizer = \"microsoft_language_stemming_tokenizer\",\n /**\n * Tokenizes the input into n-grams of the given size(s). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html\n */\n NGram = \"nGram\",\n /**\n * Tokenizer for path-like hierarchies. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html\n */\n PathHierarchy = \"path_hierarchy_v2\",\n /**\n * Tokenizer that uses regex pattern matching to construct distinct tokens. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html\n */\n Pattern = \"pattern\",\n /**\n * Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop\n * filter. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html\n */\n Standard = \"standard_v2\",\n /**\n * Tokenizes urls and emails as one token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html\n */\n UaxUrlEmail = \"uax_url_email\",\n /**\n * Divides text at whitespace. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html\n */\n Whitespace = \"whitespace\"\n}\n\n/**\n * Defines values for TokenFilterName.\n * @readonly\n */\nexport enum KnownTokenFilterNames {\n /**\n * A token filter that applies the Arabic normalizer to normalize the orthography. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html\n */\n ArabicNormalization = \"arabic_normalization\",\n /**\n * Strips all characters after an apostrophe (including the apostrophe itself). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html\n */\n Apostrophe = \"apostrophe\",\n /**\n * Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127\n * ASCII characters (the \"Basic Latin\" Unicode block) into their ASCII equivalents, if such\n * equivalents exist. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html\n */\n AsciiFolding = \"asciifolding\",\n /**\n * Forms bigrams of CJK terms that are generated from StandardTokenizer. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html\n */\n CjkBigram = \"cjk_bigram\",\n /**\n * Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic\n * Latin, and half-width Katakana variants into the equivalent Kana. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html\n */\n CjkWidth = \"cjk_width\",\n /**\n * Removes English possessives, and dots from acronyms. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html\n */\n Classic = \"classic\",\n /**\n * Construct bigrams for frequently occurring terms while indexing. Single terms are still\n * indexed too, with bigrams overlaid. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html\n */\n CommonGram = \"common_grams\",\n /**\n * Generates n-grams of the given size(s) starting from the front or the back of an input token.\n * See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html\n */\n EdgeNGram = \"edgeNGram_v2\",\n /**\n * Removes elisions. For example, \"l'avion\" (the plane) will be converted to \"avion\" (plane). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html\n */\n Elision = \"elision\",\n /**\n * Normalizes German characters according to the heuristics of the German2 snowball algorithm.\n * See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html\n */\n GermanNormalization = \"german_normalization\",\n /**\n * Normalizes text in Hindi to remove some differences in spelling variations. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html\n */\n HindiNormalization = \"hindi_normalization\",\n /**\n * Normalizes the Unicode representation of text in Indian languages. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html\n */\n IndicNormalization = \"indic_normalization\",\n /**\n * Emits each incoming token twice, once as keyword and once as non-keyword. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html\n */\n KeywordRepeat = \"keyword_repeat\",\n /**\n * A high-performance kstem filter for English. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html\n */\n KStem = \"kstem\",\n /**\n * Removes words that are too long or too short. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html\n */\n Length = \"length\",\n /**\n * Limits the number of tokens while indexing. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html\n */\n Limit = \"limit\",\n /**\n * Normalizes token text to lower case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm\n */\n Lowercase = \"lowercase\",\n /**\n * Generates n-grams of the given size(s). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html\n */\n NGram = \"nGram_v2\",\n /**\n * Applies normalization for Persian. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html\n */\n PersianNormalization = \"persian_normalization\",\n /**\n * Create tokens for phonetic matches. See\n * https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html\n */\n Phonetic = \"phonetic\",\n /**\n * Uses the Porter stemming algorithm to transform the token stream. See\n * http://tartarus.org/~martin/PorterStemmer\n */\n PorterStem = \"porter_stem\",\n /**\n * Reverses the token string. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html\n */\n Reverse = \"reverse\",\n /**\n * Normalizes use of the interchangeable Scandinavian characters. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html\n */\n ScandinavianNormalization = \"scandinavian_normalization\",\n /**\n * Folds Scandinavian characters åÅäæÄÆ-&gt;a and öÖøØ-&gt;o. It also discriminates against use\n * of double vowels aa, ae, ao, oe and oo, leaving just the first one. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html\n */\n ScandinavianFoldingNormalization = \"scandinavian_folding\",\n /**\n * Creates combinations of tokens as a single token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html\n */\n Shingle = \"shingle\",\n /**\n * A filter that stems words using a Snowball-generated stemmer. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html\n */\n Snowball = \"snowball\",\n /**\n * Normalizes the Unicode representation of Sorani text. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html\n */\n SoraniNormalization = \"sorani_normalization\",\n /**\n * Language specific stemming filter. See\n * https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters\n */\n Stemmer = \"stemmer\",\n /**\n * Removes stop words from a token stream. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html\n */\n Stopwords = \"stopwords\",\n /**\n * Trims leading and trailing whitespace from tokens. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html\n */\n Trim = \"trim\",\n /**\n * Truncates the terms to a specific length. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html\n */\n Truncate = \"truncate\",\n /**\n * Filters out tokens with same text as the previous token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html\n */\n Unique = \"unique\",\n /**\n * Normalizes token text to upper case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html\n */\n Uppercase = \"uppercase\",\n /**\n * Splits words into subwords and performs optional transformations on subword groups.\n */\n WordDelimiter = \"word_delimiter\"\n}\n\n/**\n * Defines values for CharFilterName.\n * @readonly\n */\nexport enum KnownCharFilterNames {\n /**\n * A character filter that attempts to strip out HTML constructs. See\n * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.html\n */\n HtmlStrip = \"html_strip\"\n}\n\n/**\n * Defines values for AnalyzerName.\n * See https://docs.microsoft.com/rest/api/searchservice/Language-support\n * @readonly\n */\nexport enum KnownAnalyzerNames {\n /**\n * Arabic\n */\n ArMicrosoft = \"ar.microsoft\",\n /**\n * Arabic\n */\n ArLucene = \"ar.lucene\",\n /**\n * Armenian\n */\n HyLucene = \"hy.lucene\",\n /**\n * Bangla\n */\n BnMicrosoft = \"bn.microsoft\",\n /**\n * Basque\n */\n EuLucene = \"eu.lucene\",\n /**\n * Bulgarian\n */\n BgMicrosoft = \"bg.microsoft\",\n /**\n * Bulgarian\n */\n BgLucene = \"bg.lucene\",\n /**\n * Catalan\n */\n CaMicrosoft = \"ca.microsoft\",\n /**\n * Catalan\n */\n CaLucene = \"ca.lucene\",\n /**\n * Chinese Simplified\n */\n ZhHansMicrosoft = \"zh-Hans.microsoft\",\n /**\n * Chinese Simplified\n */\n ZhHansLucene = \"zh-Hans.lucene\",\n /**\n * Chinese Traditional\n */\n ZhHantMicrosoft = \"zh-Hant.microsoft\",\n /**\n * Chinese Traditional\n */\n ZhHantLucene = \"zh-Hant.lucene\",\n /**\n * Croatian\n */\n HrMicrosoft = \"hr.microsoft\",\n /**\n * Czech\n */\n CsMicrosoft = \"cs.microsoft\",\n /**\n * Czech\n */\n CsLucene = \"cs.lucene\",\n /**\n * Danish\n */\n DaMicrosoft = \"da.microsoft\",\n /**\n * Danish\n */\n DaLucene = \"da.lucene\",\n /**\n * Dutch\n */\n NlMicrosoft = \"nl.microsoft\",\n /**\n * Dutch\n */\n NlLucene = \"nl.lucene\",\n /**\n * English\n */\n EnMicrosoft = \"en.microsoft\",\n /**\n * English\n */\n EnLucene = \"en.lucene\",\n /**\n * Estonian\n */\n EtMicrosoft = \"et.microsoft\",\n /**\n * Finnish\n */\n FiMicrosoft = \"fi.microsoft\",\n /**\n * Finnish\n */\n FiLucene = \"fi.lucene\",\n /**\n * French\n */\n FrMicrosoft = \"fr.microsoft\",\n /**\n * French\n */\n FrLucene = \"fr.lucene\",\n /**\n * Galician\n */\n GlLucene = \"gl.lucene\",\n /**\n * German\n */\n DeMicrosoft = \"de.microsoft\",\n /**\n * German\n */\n DeLucene = \"de.lucene\",\n /**\n * Greek\n */\n ElMicrosoft = \"el.microsoft\",\n /**\n * Greek\n */\n ElLucene = \"el.lucene\",\n /**\n * Gujarati\n */\n GuMicrosoft = \"gu.microsoft\",\n /**\n * Hebrew\n */\n HeMicrosoft = \"he.microsoft\",\n /**\n * Hindi\n */\n HiMicrosoft = \"hi.microsoft\",\n /**\n * Hindi\n */\n HiLucene = \"hi.lucene\",\n /**\n * Hungarian\n */\n HuMicrosoft = \"hu.microsoft\",\n /**\n * Hungarian\n */\n HuLucene = \"hu.lucene\",\n /**\n * Icelandic\n */\n IsMicrosoft = \"is.microsoft\",\n /**\n * Indonesian (Bahasa)\n */\n IdMicrosoft = \"id.microsoft\",\n /**\n * Indonesian (Bahasa)\n */\n IdLucene = \"id.lucene\",\n /**\n * Irish\n */\n GaLucene = \"ga.lucene\",\n /**\n * Italian\n */\n ItMicrosoft = \"it.microsoft\",\n /**\n * Italian\n */\n ItLucene = \"it.lucene\",\n /**\n * Japanese\n */\n JaMicrosoft = \"ja.microsoft\",\n /**\n * Japanese\n */\n JaLucene = \"ja.lucene\",\n /**\n * Kannada\n */\n KnMicrosoft = \"kn.microsoft\",\n /**\n * Korean\n */\n KoMicrosoft = \"ko.microsoft\",\n /**\n * Korean\n */\n KoLucene = \"ko.lucene\",\n /**\n * Latvian\n */\n LvMicrosoft = \"lv.microsoft\",\n /**\n * Latvian\n */\n LvLucene = \"lv.lucene\",\n /**\n * Lithuanian\n */\n LtMicrosoft = \"lt.microsoft\",\n /**\n * Malayalam\n */\n MlMicrosoft = \"ml.microsoft\",\n /**\n * Malay (Latin)\n */\n MsMicrosoft = \"ms.microsoft\",\n /**\n * Marathi\n */\n MrMicrosoft = \"mr.microsoft\",\n /**\n * Norwegian\n */\n NbMicrosoft = \"nb.microsoft\",\n /**\n * Norwegian\n */\n NoLucene = \"no.lucene\",\n /**\n * Persian\n */\n FaLucene = \"fa.lucene\",\n /**\n * Polish\n */\n PlMicrosoft = \"pl.microsoft\",\n /**\n * Polish\n */\n PlLucene = \"pl.lucene\",\n /**\n * Portuguese (Brazil)\n */\n PtBRMicrosoft = \"pt-BR.microsoft\",\n /**\n * Portuguese (Brazil)\n */\n PtBRLucene = \"pt-BR.lucene\",\n /**\n * Portuguese (Portugal)\n */\n PtPTMicrosoft = \"pt-PT.microsoft\",\n /**\n * Portuguese (Portugal)\n */\n PtPTLucene = \"pt-PT.lucene\",\n /**\n * Punjabi\n */ PaMicrosoft = \"pa.microsoft\",\n /**\n * Romanian\n */\n RoMicrosoft = \"ro.microsoft\",\n /**\n * Romanian\n */\n RoLucene = \"ro.lucene\",\n /**\n * Russian\n */\n RuMicrosoft = \"ru.microsoft\",\n /**\n * Russian\n */\n RuLucene = \"ru.lucene\",\n /**\n * Serbian (Cyrillic)\n */\n SrCyrillicMicrosoft = \"sr-cyrillic.microsoft\",\n /**\n * Serbian (Latin)\n */\n SrLatinMicrosoft = \"sr-latin.microsoft\",\n /**\n * Slovak\n */\n SkMicrosoft = \"sk.microsoft\",\n /**\n * Slovenian\n */\n SlMicrosoft = \"sl.microsoft\",\n /**\n * Spanish\n */\n EsMicrosoft = \"es.microsoft\",\n /**\n * Spanish\n */\n EsLucene = \"es.lucene\",\n /**\n * Swedish\n */\n SvMicrosoft = \"sv.microsoft\",\n /**\n * Swedish\n */\n SvLucene = \"sv.lucene\",\n /**\n * Tamil\n */\n TaMicrosoft = \"ta.microsoft\",\n /**\n * Telugu\n */\n TeMicrosoft = \"te.microsoft\",\n /**\n * Thai\n */\n ThMicrosoft = \"th.microsoft\",\n /**\n * Thai\n */\n ThLucene = \"th.lucene\",\n /**\n * Turkish\n */\n TrMicrosoft = \"tr.microsoft\",\n /**\n * Turkish\n */\n TrLucene = \"tr.lucene\",\n /**\n * Ukrainian\n */\n UkMicrosoft = \"uk.microsoft\",\n /**\n * Urdu\n */\n UrMicrosoft = \"ur.microsoft\",\n /**\n * Vietnamese\n */\n ViMicrosoft = \"vi.microsoft\",\n /**\n * See: https://lucene.apache.org/core/6_6_1/core/org/apache/lucene/analysis/standard/StandardAnalyzer.html\n */\n StandardLucene = \"standard.lucene\",\n /**\n * See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html\n */\n StandardAsciiFoldingLucene = \"standardasciifolding.lucene\",\n /**\n * Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names.\n */\n Keyword = \"keyword\",\n /**\n * Flexibly separates text into terms via a regular expression pattern.\n */\n Pattern = \"pattern\",\n /**\n * Divides text at non-letters and converts them to lower case.\n */\n Simple = \"simple\",\n /**\n * Divides text at non-letters; Applies the lowercase and stopword token filters.\n */\n Stop = \"stop\",\n /**\n * An analyzer that uses the whitespace tokenizer.\n */\n Whitespace = \"whitespace\"\n}\n\n/**\n * Contains the possible cases for DataChangeDetectionPolicy.\n */\nexport type DataChangeDetectionPolicy =\n | HighWaterMarkChangeDetectionPolicy\n | SqlIntegratedChangeTrackingPolicy;\n\n/**\n * Contains the possible cases for DataDeletionDetectionPolicy.\n */\nexport type DataDeletionDetectionPolicy = SoftDeleteColumnDeletionDetectionPolicy;\n\n/**\n * Represents a datasource definition, which can be used to configure an indexer.\n */\nexport interface SearchIndexerDataSourceConnection {\n /**\n * The name of the datasource.\n */\n name: string;\n /**\n * The description of the datasource.\n */\n description?: string;\n /**\n * The type of the datasource. Possible values include: 'AzureSql', 'CosmosDb', 'AzureBlob',\n * 'AzureTable', 'MySql', 'AdlsGen2'\n */\n type: SearchIndexerDataSourceType;\n /**\n * The connection string for the datasource.\n */\n connectionString?: string;\n /**\n * The data container for the datasource.\n */\n container: SearchIndexerDataContainer;\n /**\n * The data change detection policy for the datasource.\n */\n dataChangeDetectionPolicy?: DataChangeDetectionPolicy | null;\n /**\n * The data deletion detection policy for the datasource.\n */\n dataDeletionDetectionPolicy?: DataDeletionDetectionPolicy | null;\n /**\n * The ETag of the DataSource.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your datasource definition when you want\n * full assurance that no one, not even Microsoft, can decrypt your data source definition in\n * Azure Cognitive Search. Once you have encrypted your data source definition, it will always\n * remain encrypted. Azure Cognitive Search will ignore attempts to set this property to null.\n * You can change this property as needed if you want to rotate your encryption key; Your\n * datasource definition will be unaffected. Encryption with customer-managed keys is not\n * available for free search services, and is only available for paid services created on or\n * after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey | null;\n}\n// END manually modified generated interfaces\n"]}
@@ -1,8 +1,7 @@
1
1
  // Copyright (c) Microsoft Corporation.
2
2
  // Licensed under the MIT license.
3
3
  import { __rest } from "tslib";
4
- import { isComplexField, } from "./serviceModels";
5
- export const DEFAULT_SEARCH_SCOPE = "https://search.azure.com/.default";
4
+ import { isComplexField } from "./serviceModels";
6
5
  export function convertSkillsToPublic(skills) {
7
6
  if (!skills) {
8
7
  return skills;
@@ -40,18 +39,6 @@ export function convertSkillsToPublic(skills) {
40
39
  case "#Microsoft.Skills.Text.SplitSkill":
41
40
  result.push(skill);
42
41
  break;
43
- case "#Microsoft.Skills.Text.PIIDetectionSkill":
44
- result.push(skill);
45
- break;
46
- case "#Microsoft.Skills.Text.V3.EntityRecognitionSkill":
47
- result.push(skill);
48
- break;
49
- case "#Microsoft.Skills.Text.V3.EntityLinkingSkill":
50
- result.push(skill);
51
- break;
52
- case "#Microsoft.Skills.Text.V3.SentimentSkill":
53
- result.push(skill);
54
- break;
55
42
  case "#Microsoft.Skills.Text.TranslationSkill":
56
43
  result.push(skill);
57
44
  break;
@@ -64,9 +51,6 @@ export function convertSkillsToPublic(skills) {
64
51
  case "#Microsoft.Skills.Util.DocumentExtractionSkill":
65
52
  result.push(skill);
66
53
  break;
67
- case "#Microsoft.Skills.Custom.AmlSkill":
68
- result.push(skill);
69
- break;
70
54
  }
71
55
  }
72
56
  return result;
@@ -154,7 +138,7 @@ export function convertFieldsToPublic(fields) {
154
138
  return {
155
139
  name: field.name,
156
140
  type: field.type,
157
- fields: convertFieldsToPublic(field.fields),
141
+ fields: convertFieldsToPublic(field.fields)
158
142
  };
159
143
  }
160
144
  else {
@@ -162,15 +146,13 @@ export function convertFieldsToPublic(fields) {
162
146
  const searchAnalyzerName = field.searchAnalyzer;
163
147
  const indexAnalyzerName = field.indexAnalyzer;
164
148
  const synonymMapNames = field.synonymMaps;
165
- const normalizerName = field.normalizer;
166
149
  const { retrievable } = field, restField = __rest(field, ["retrievable"]);
167
150
  const hidden = typeof retrievable === "boolean" ? !retrievable : retrievable;
168
151
  result = Object.assign(Object.assign({}, restField), { hidden,
169
152
  analyzerName,
170
153
  searchAnalyzerName,
171
154
  indexAnalyzerName,
172
- synonymMapNames,
173
- normalizerName });
155
+ synonymMapNames });
174
156
  }
175
157
  return result;
176
158
  });
@@ -182,7 +164,7 @@ export function convertFieldsToGenerated(fields) {
182
164
  return {
183
165
  name: field.name,
184
166
  type: field.type,
185
- fields: convertFieldsToGenerated(field.fields),
167
+ fields: convertFieldsToGenerated(field.fields)
186
168
  };
187
169
  }
188
170
  else {
@@ -190,7 +172,7 @@ export function convertFieldsToGenerated(fields) {
190
172
  const retrievable = typeof hidden === "boolean" ? !hidden : hidden;
191
173
  return Object.assign(Object.assign({}, restField), { retrievable,
192
174
  // modify API defaults to use less storage for simple types
193
- searchable: (_a = field.searchable) !== null && _a !== void 0 ? _a : false, filterable: (_b = field.filterable) !== null && _b !== void 0 ? _b : false, facetable: (_c = field.facetable) !== null && _c !== void 0 ? _c : false, sortable: (_d = field.sortable) !== null && _d !== void 0 ? _d : false, analyzer: field.analyzerName, searchAnalyzer: field.searchAnalyzerName, indexAnalyzer: field.indexAnalyzerName, synonymMaps: field.synonymMapNames, normalizer: field.normalizerName });
175
+ searchable: (_a = field.searchable) !== null && _a !== void 0 ? _a : false, filterable: (_b = field.filterable) !== null && _b !== void 0 ? _b : false, facetable: (_c = field.facetable) !== null && _c !== void 0 ? _c : false, sortable: (_d = field.sortable) !== null && _d !== void 0 ? _d : false, analyzer: field.analyzerName, searchAnalyzer: field.searchAnalyzerName, indexAnalyzer: field.indexAnalyzerName, synonymMaps: field.synonymMapNames });
194
176
  }
195
177
  });
196
178
  }
@@ -243,6 +225,17 @@ export function convertSimilarityToPublic(similarity) {
243
225
  return similarity;
244
226
  }
245
227
  }
228
+ export function extractOperationOptions(obj) {
229
+ const { abortSignal, requestOptions, tracingOptions } = obj, restOptions = __rest(obj, ["abortSignal", "requestOptions", "tracingOptions"]);
230
+ return {
231
+ operationOptions: {
232
+ abortSignal,
233
+ requestOptions,
234
+ tracingOptions
235
+ },
236
+ restOptions
237
+ };
238
+ }
246
239
  export function convertEncryptionKeyToPublic(encryptionKey) {
247
240
  if (!encryptionKey) {
248
241
  return encryptionKey;
@@ -250,8 +243,7 @@ export function convertEncryptionKeyToPublic(encryptionKey) {
250
243
  const result = {
251
244
  keyName: encryptionKey.keyName,
252
245
  keyVersion: encryptionKey.keyVersion,
253
- vaultUrl: encryptionKey.vaultUri,
254
- identity: convertSearchIndexerDataIdentityToPublic(encryptionKey.identity),
246
+ vaultUrl: encryptionKey.vaultUri
255
247
  };
256
248
  if (encryptionKey.accessCredentials) {
257
249
  result.applicationId = encryptionKey.accessCredentials.applicationId;
@@ -266,13 +258,12 @@ export function convertEncryptionKeyToGenerated(encryptionKey) {
266
258
  const result = {
267
259
  keyName: encryptionKey.keyName,
268
260
  keyVersion: encryptionKey.keyVersion,
269
- vaultUri: encryptionKey.vaultUrl,
270
- identity: encryptionKey.identity,
261
+ vaultUri: encryptionKey.vaultUrl
271
262
  };
272
263
  if (encryptionKey.applicationId) {
273
264
  result.accessCredentials = {
274
265
  applicationId: encryptionKey.applicationId,
275
- applicationSecret: encryptionKey.applicationSecret,
266
+ applicationSecret: encryptionKey.applicationSecret
276
267
  };
277
268
  }
278
269
  return result;
@@ -289,11 +280,9 @@ export function generatedIndexToPublicIndex(generatedIndex) {
289
280
  tokenizers: convertTokenizersToPublic(generatedIndex.tokenizers),
290
281
  tokenFilters: generatedIndex.tokenFilters,
291
282
  charFilters: generatedIndex.charFilters,
292
- normalizers: generatedIndex.normalizers,
293
283
  scoringProfiles: generatedIndex.scoringProfiles,
294
284
  fields: convertFieldsToPublic(generatedIndex.fields),
295
- similarity: convertSimilarityToPublic(generatedIndex.similarity),
296
- semanticSettings: generatedIndex.semanticSettings,
285
+ similarity: convertSimilarityToPublic(generatedIndex.similarity)
297
286
  };
298
287
  }
299
288
  export function generatedSearchResultToPublicSearchResult(results) {
@@ -303,9 +292,7 @@ export function generatedSearchResultToPublicSearchResult(results) {
303
292
  const obj = {
304
293
  score: _score,
305
294
  highlights: _highlights,
306
- rerankerScore,
307
- captions,
308
- document: doc,
295
+ document: doc
309
296
  };
310
297
  return obj;
311
298
  });
@@ -317,13 +304,13 @@ export function generatedSuggestDocumentsResultToPublicSuggestDocumentsResult(se
317
304
  const doc = Object.assign({}, restProps);
318
305
  const obj = {
319
306
  text: _text,
320
- document: doc,
307
+ document: doc
321
308
  };
322
309
  return obj;
323
310
  });
324
311
  const result = {
325
312
  results: results,
326
- coverage: searchDocumentsResult.coverage,
313
+ coverage: searchDocumentsResult.coverage
327
314
  };
328
315
  return result;
329
316
  }
@@ -337,13 +324,11 @@ export function publicIndexToGeneratedIndex(index) {
337
324
  etag: index.etag,
338
325
  tokenFilters: convertTokenFiltersToGenerated(index.tokenFilters),
339
326
  charFilters: index.charFilters,
340
- normalizers: index.normalizers,
341
327
  scoringProfiles: index.scoringProfiles,
342
328
  analyzers: convertAnalyzersToGenerated(index.analyzers),
343
329
  tokenizers: convertTokenizersToGenerated(index.tokenizers),
344
330
  fields: convertFieldsToGenerated(index.fields),
345
- similarity: convertSimilarityToGenerated(index.similarity),
346
- semanticSettings: index.semanticSettings,
331
+ similarity: convertSimilarityToGenerated(index.similarity)
347
332
  };
348
333
  }
349
334
  export function generatedSkillsetToPublicSkillset(generatedSkillset) {
@@ -354,7 +339,7 @@ export function generatedSkillsetToPublicSkillset(generatedSkillset) {
354
339
  cognitiveServicesAccount: convertCognitiveServicesAccountToPublic(generatedSkillset.cognitiveServicesAccount),
355
340
  knowledgeStore: generatedSkillset.knowledgeStore,
356
341
  etag: generatedSkillset.etag,
357
- encryptionKey: convertEncryptionKeyToPublic(generatedSkillset.encryptionKey),
342
+ encryptionKey: convertEncryptionKeyToPublic(generatedSkillset.encryptionKey)
358
343
  };
359
344
  }
360
345
  export function publicSkillsetToGeneratedSkillset(skillset) {
@@ -365,7 +350,7 @@ export function publicSkillsetToGeneratedSkillset(skillset) {
365
350
  skills: skillset.skills,
366
351
  cognitiveServicesAccount: convertCognitiveServicesAccountToGenerated(skillset.cognitiveServicesAccount),
367
352
  knowledgeStore: skillset.knowledgeStore,
368
- encryptionKey: convertEncryptionKeyToGenerated(skillset.encryptionKey),
353
+ encryptionKey: convertEncryptionKeyToGenerated(skillset.encryptionKey)
369
354
  };
370
355
  }
371
356
  export function generatedSynonymMapToPublicSynonymMap(synonymMap) {
@@ -373,7 +358,7 @@ export function generatedSynonymMapToPublicSynonymMap(synonymMap) {
373
358
  name: synonymMap.name,
374
359
  encryptionKey: convertEncryptionKeyToPublic(synonymMap.encryptionKey),
375
360
  etag: synonymMap.etag,
376
- synonyms: [],
361
+ synonyms: []
377
362
  };
378
363
  if (synonymMap.synonyms) {
379
364
  result.synonyms = synonymMap.synonyms.split("\n");
@@ -386,7 +371,7 @@ export function publicSynonymMapToGeneratedSynonymMap(synonymMap) {
386
371
  format: "solr",
387
372
  encryptionKey: convertEncryptionKeyToGenerated(synonymMap.encryptionKey),
388
373
  etag: synonymMap.etag,
389
- synonyms: synonymMap.synonyms.join("\n"),
374
+ synonyms: synonymMap.synonyms.join("\n")
390
375
  };
391
376
  result.encryptionKey = convertEncryptionKeyToGenerated(synonymMap.encryptionKey);
392
377
  return result;
@@ -403,14 +388,13 @@ export function publicDataSourceToGeneratedDataSource(dataSource) {
403
388
  description: dataSource.description,
404
389
  type: dataSource.type,
405
390
  credentials: {
406
- connectionString: dataSource.connectionString,
391
+ connectionString: dataSource.connectionString
407
392
  },
408
393
  container: dataSource.container,
409
- identity: dataSource.identity,
410
394
  etag: dataSource.etag,
411
395
  dataChangeDetectionPolicy: dataSource.dataChangeDetectionPolicy,
412
396
  dataDeletionDetectionPolicy: dataSource.dataDeletionDetectionPolicy,
413
- encryptionKey: convertEncryptionKeyToGenerated(dataSource.encryptionKey),
397
+ encryptionKey: convertEncryptionKeyToGenerated(dataSource.encryptionKey)
414
398
  };
415
399
  }
416
400
  export function generatedDataSourceToPublicDataSource(dataSource) {
@@ -420,24 +404,12 @@ export function generatedDataSourceToPublicDataSource(dataSource) {
420
404
  type: dataSource.type,
421
405
  connectionString: dataSource.credentials.connectionString,
422
406
  container: dataSource.container,
423
- identity: convertSearchIndexerDataIdentityToPublic(dataSource.identity),
424
407
  etag: dataSource.etag,
425
408
  dataChangeDetectionPolicy: convertDataChangeDetectionPolicyToPublic(dataSource.dataChangeDetectionPolicy),
426
409
  dataDeletionDetectionPolicy: convertDataDeletionDetectionPolicyToPublic(dataSource.dataDeletionDetectionPolicy),
427
- encryptionKey: convertEncryptionKeyToPublic(dataSource.encryptionKey),
410
+ encryptionKey: convertEncryptionKeyToPublic(dataSource.encryptionKey)
428
411
  };
429
412
  }
430
- export function convertSearchIndexerDataIdentityToPublic(searchIndexerDataIdentity) {
431
- if (!searchIndexerDataIdentity) {
432
- return searchIndexerDataIdentity;
433
- }
434
- if (searchIndexerDataIdentity.odatatype === "#Microsoft.Azure.Search.SearchIndexerDataNoneIdentity") {
435
- return searchIndexerDataIdentity;
436
- }
437
- else {
438
- return searchIndexerDataIdentity;
439
- }
440
- }
441
413
  export function convertDataChangeDetectionPolicyToPublic(dataChangeDetectionPolicy) {
442
414
  if (!dataChangeDetectionPolicy) {
443
415
  return dataChangeDetectionPolicy;
@@ -466,14 +438,4 @@ export function getRandomIntegerInclusive(min, max) {
466
438
  const offset = Math.floor(Math.random() * (max - min + 1));
467
439
  return offset + min;
468
440
  }
469
- /**
470
- * A wrapper for setTimeout that resolves a promise after timeInMs milliseconds.
471
- * @param timeInMs - The number of milliseconds to be delayed.
472
- * @returns Promise that is resolved after timeInMs
473
- */
474
- export function delay(timeInMs) {
475
- return new Promise((resolve) => setTimeout(() => resolve(), timeInMs));
476
- }
477
- export const serviceVersions = ["2020-06-30", "2021-04-30-Preview"];
478
- export const defaultServiceVersion = "2021-04-30-Preview";
479
441
  //# sourceMappingURL=serviceUtils.js.map