@azure/search-documents 12.2.0-beta.3 → 12.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/errorModels.d.ts +17 -23
- package/dist/browser/errorModels.d.ts.map +1 -1
- package/dist/browser/errorModels.js.map +1 -1
- package/dist/browser/generated/data/models/index.d.ts +20 -567
- package/dist/browser/generated/data/models/index.d.ts.map +1 -1
- package/dist/browser/generated/data/models/index.js +6 -216
- package/dist/browser/generated/data/models/index.js.map +1 -1
- package/dist/browser/generated/data/models/mappers.d.ts +0 -18
- package/dist/browser/generated/data/models/mappers.d.ts.map +1 -1
- package/dist/browser/generated/data/models/mappers.js +4 -435
- package/dist/browser/generated/data/models/mappers.js.map +1 -1
- package/dist/browser/generated/data/models/parameters.d.ts +0 -5
- package/dist/browser/generated/data/models/parameters.d.ts.map +1 -1
- package/dist/browser/generated/data/models/parameters.js +0 -51
- package/dist/browser/generated/data/models/parameters.js.map +1 -1
- package/dist/browser/generated/data/operations/documents.js +3 -11
- package/dist/browser/generated/data/operations/documents.js.map +1 -1
- package/dist/browser/generated/data/searchClient.d.ts +3 -3
- package/dist/browser/generated/data/searchClient.d.ts.map +1 -1
- package/dist/browser/generated/data/searchClient.js +1 -1
- package/dist/browser/generated/data/searchClient.js.map +1 -1
- package/dist/browser/generated/service/models/index.d.ts +130 -1023
- package/dist/browser/generated/service/models/index.d.ts.map +1 -1
- package/dist/browser/generated/service/models/index.js +24 -196
- package/dist/browser/generated/service/models/index.js.map +1 -1
- package/dist/browser/generated/service/models/mappers.d.ts +17 -67
- package/dist/browser/generated/service/models/mappers.d.ts.map +1 -1
- package/dist/browser/generated/service/models/mappers.js +1579 -2957
- package/dist/browser/generated/service/models/mappers.js.map +1 -1
- package/dist/browser/generated/service/models/parameters.d.ts +2 -14
- package/dist/browser/generated/service/models/parameters.d.ts.map +1 -1
- package/dist/browser/generated/service/models/parameters.js +7 -89
- package/dist/browser/generated/service/models/parameters.js.map +1 -1
- package/dist/browser/generated/service/operations/aliases.d.ts +1 -43
- package/dist/browser/generated/service/operations/aliases.d.ts.map +1 -1
- package/dist/browser/generated/service/operations/aliases.js +200 -160
- package/dist/browser/generated/service/operations/aliases.js.map +1 -1
- package/dist/browser/generated/service/operations/dataSources.js +1 -4
- package/dist/browser/generated/service/operations/dataSources.js.map +1 -1
- package/dist/browser/generated/service/operations/index.d.ts +0 -3
- package/dist/browser/generated/service/operations/index.d.ts.map +1 -1
- package/dist/browser/generated/service/operations/index.js +0 -3
- package/dist/browser/generated/service/operations/index.js.map +1 -1
- package/dist/browser/generated/service/operations/indexers.d.ts +1 -14
- package/dist/browser/generated/service/operations/indexers.d.ts.map +1 -1
- package/dist/browser/generated/service/operations/indexers.js +1 -54
- package/dist/browser/generated/service/operations/indexers.js.map +1 -1
- package/dist/browser/generated/service/operations/skillsets.d.ts +1 -8
- package/dist/browser/generated/service/operations/skillsets.d.ts.map +1 -1
- package/dist/browser/generated/service/operations/skillsets.js +1 -30
- package/dist/browser/generated/service/operations/skillsets.js.map +1 -1
- package/dist/browser/generated/service/operationsInterfaces/aliases.d.ts +1 -35
- package/dist/browser/generated/service/operationsInterfaces/aliases.d.ts.map +1 -1
- package/dist/browser/generated/service/operationsInterfaces/aliases.js +66 -7
- package/dist/browser/generated/service/operationsInterfaces/aliases.js.map +1 -1
- package/dist/browser/generated/service/operationsInterfaces/index.d.ts +0 -3
- package/dist/browser/generated/service/operationsInterfaces/index.d.ts.map +1 -1
- package/dist/browser/generated/service/operationsInterfaces/index.js +0 -3
- package/dist/browser/generated/service/operationsInterfaces/index.js.map +1 -1
- package/dist/browser/generated/service/operationsInterfaces/indexers.d.ts +1 -14
- package/dist/browser/generated/service/operationsInterfaces/indexers.d.ts.map +1 -1
- package/dist/browser/generated/service/operationsInterfaces/indexers.js.map +1 -1
- package/dist/browser/generated/service/operationsInterfaces/skillsets.d.ts +1 -8
- package/dist/browser/generated/service/operationsInterfaces/skillsets.d.ts.map +1 -1
- package/dist/browser/generated/service/operationsInterfaces/skillsets.js.map +1 -1
- package/dist/browser/generated/service/searchServiceClient.d.ts +4 -12
- package/dist/browser/generated/service/searchServiceClient.d.ts.map +1 -1
- package/dist/browser/generated/service/searchServiceClient.js +2 -31
- package/dist/browser/generated/service/searchServiceClient.js.map +1 -1
- package/dist/browser/index.d.ts +4 -7
- package/dist/browser/index.d.ts.map +1 -1
- package/dist/browser/index.js +3 -5
- package/dist/browser/index.js.map +1 -1
- package/dist/browser/indexDocumentsBatch.d.ts +5 -6
- package/dist/browser/indexDocumentsBatch.d.ts.map +1 -1
- package/dist/browser/indexDocumentsBatch.js +4 -5
- package/dist/browser/indexDocumentsBatch.js.map +1 -1
- package/dist/browser/indexModels.d.ts +30 -236
- package/dist/browser/indexModels.d.ts.map +1 -1
- package/dist/browser/indexModels.js.map +1 -1
- package/dist/browser/odata.d.ts +2 -5
- package/dist/browser/odata.d.ts.map +1 -1
- package/dist/browser/odata.js +2 -5
- package/dist/browser/odata.js.map +1 -1
- package/dist/browser/odataMetadataPolicy.d.ts +1 -1
- package/dist/browser/odataMetadataPolicy.d.ts.map +1 -1
- package/dist/browser/odataMetadataPolicy.js.map +1 -1
- package/dist/browser/searchApiKeyCredentialPolicy.d.ts +4 -4
- package/dist/browser/searchApiKeyCredentialPolicy.d.ts.map +1 -1
- package/dist/browser/searchApiKeyCredentialPolicy.js +2 -2
- package/dist/browser/searchApiKeyCredentialPolicy.js.map +1 -1
- package/dist/browser/searchClient.d.ts +53 -64
- package/dist/browser/searchClient.d.ts.map +1 -1
- package/dist/browser/searchClient.js +53 -113
- package/dist/browser/searchClient.js.map +1 -1
- package/dist/browser/searchIndexClient.d.ts +18 -151
- package/dist/browser/searchIndexClient.d.ts.map +1 -1
- package/dist/browser/searchIndexClient.js +17 -458
- package/dist/browser/searchIndexClient.js.map +1 -1
- package/dist/browser/searchIndexerClient.d.ts +11 -26
- package/dist/browser/searchIndexerClient.d.ts.map +1 -1
- package/dist/browser/searchIndexerClient.js +7 -62
- package/dist/browser/searchIndexerClient.js.map +1 -1
- package/dist/browser/searchIndexingBufferedSender.d.ts +3 -3
- package/dist/browser/searchIndexingBufferedSender.d.ts.map +1 -1
- package/dist/browser/searchIndexingBufferedSender.js +1 -1
- package/dist/browser/searchIndexingBufferedSender.js.map +1 -1
- package/dist/browser/serviceModels.d.ts +213 -1119
- package/dist/browser/serviceModels.d.ts.map +1 -1
- package/dist/browser/serviceModels.js +7 -8
- package/dist/browser/serviceModels.js.map +1 -1
- package/dist/browser/serviceUtils.d.ts +6 -12
- package/dist/browser/serviceUtils.d.ts.map +1 -1
- package/dist/browser/serviceUtils.js +45 -226
- package/dist/browser/serviceUtils.js.map +1 -1
- package/dist/browser/synonymMapHelper-browser.d.mts.map +1 -1
- package/dist/browser/synonymMapHelper-browser.mjs.map +1 -1
- package/dist/browser/synonymMapHelper.d.ts +1 -1
- package/dist/commonjs/errorModels.d.ts +17 -23
- package/dist/commonjs/errorModels.d.ts.map +1 -1
- package/dist/commonjs/errorModels.js.map +1 -1
- package/dist/commonjs/generated/data/models/index.d.ts +20 -567
- package/dist/commonjs/generated/data/models/index.d.ts.map +1 -1
- package/dist/commonjs/generated/data/models/index.js +7 -217
- package/dist/commonjs/generated/data/models/index.js.map +1 -1
- package/dist/commonjs/generated/data/models/mappers.d.ts +0 -18
- package/dist/commonjs/generated/data/models/mappers.d.ts.map +1 -1
- package/dist/commonjs/generated/data/models/mappers.js +5 -436
- package/dist/commonjs/generated/data/models/mappers.js.map +1 -1
- package/dist/commonjs/generated/data/models/parameters.d.ts +0 -5
- package/dist/commonjs/generated/data/models/parameters.d.ts.map +1 -1
- package/dist/commonjs/generated/data/models/parameters.js +2 -53
- package/dist/commonjs/generated/data/models/parameters.js.map +1 -1
- package/dist/commonjs/generated/data/operations/documents.js +3 -11
- package/dist/commonjs/generated/data/operations/documents.js.map +1 -1
- package/dist/commonjs/generated/data/searchClient.d.ts +3 -3
- package/dist/commonjs/generated/data/searchClient.d.ts.map +1 -1
- package/dist/commonjs/generated/data/searchClient.js +1 -1
- package/dist/commonjs/generated/data/searchClient.js.map +1 -1
- package/dist/commonjs/generated/service/models/index.d.ts +130 -1023
- package/dist/commonjs/generated/service/models/index.d.ts.map +1 -1
- package/dist/commonjs/generated/service/models/index.js +25 -198
- package/dist/commonjs/generated/service/models/index.js.map +1 -1
- package/dist/commonjs/generated/service/models/mappers.d.ts +17 -67
- package/dist/commonjs/generated/service/models/mappers.d.ts.map +1 -1
- package/dist/commonjs/generated/service/models/mappers.js +1583 -2962
- package/dist/commonjs/generated/service/models/mappers.js.map +1 -1
- package/dist/commonjs/generated/service/models/parameters.d.ts +2 -14
- package/dist/commonjs/generated/service/models/parameters.d.ts.map +1 -1
- package/dist/commonjs/generated/service/models/parameters.js +7 -89
- package/dist/commonjs/generated/service/models/parameters.js.map +1 -1
- package/dist/commonjs/generated/service/operations/aliases.d.ts +1 -43
- package/dist/commonjs/generated/service/operations/aliases.d.ts.map +1 -1
- package/dist/commonjs/generated/service/operations/aliases.js +199 -163
- package/dist/commonjs/generated/service/operations/aliases.js.map +1 -1
- package/dist/commonjs/generated/service/operations/dataSources.js +1 -4
- package/dist/commonjs/generated/service/operations/dataSources.js.map +1 -1
- package/dist/commonjs/generated/service/operations/index.d.ts +0 -3
- package/dist/commonjs/generated/service/operations/index.d.ts.map +1 -1
- package/dist/commonjs/generated/service/operations/index.js +0 -3
- package/dist/commonjs/generated/service/operations/index.js.map +1 -1
- package/dist/commonjs/generated/service/operations/indexers.d.ts +1 -14
- package/dist/commonjs/generated/service/operations/indexers.d.ts.map +1 -1
- package/dist/commonjs/generated/service/operations/indexers.js +1 -54
- package/dist/commonjs/generated/service/operations/indexers.js.map +1 -1
- package/dist/commonjs/generated/service/operations/skillsets.d.ts +1 -8
- package/dist/commonjs/generated/service/operations/skillsets.d.ts.map +1 -1
- package/dist/commonjs/generated/service/operations/skillsets.js +1 -30
- package/dist/commonjs/generated/service/operations/skillsets.js.map +1 -1
- package/dist/commonjs/generated/service/operationsInterfaces/aliases.d.ts +1 -35
- package/dist/commonjs/generated/service/operationsInterfaces/aliases.d.ts.map +1 -1
- package/dist/commonjs/generated/service/operationsInterfaces/aliases.js +66 -7
- package/dist/commonjs/generated/service/operationsInterfaces/aliases.js.map +1 -1
- package/dist/commonjs/generated/service/operationsInterfaces/index.d.ts +0 -3
- package/dist/commonjs/generated/service/operationsInterfaces/index.d.ts.map +1 -1
- package/dist/commonjs/generated/service/operationsInterfaces/index.js +0 -3
- package/dist/commonjs/generated/service/operationsInterfaces/index.js.map +1 -1
- package/dist/commonjs/generated/service/operationsInterfaces/indexers.d.ts +1 -14
- package/dist/commonjs/generated/service/operationsInterfaces/indexers.d.ts.map +1 -1
- package/dist/commonjs/generated/service/operationsInterfaces/indexers.js.map +1 -1
- package/dist/commonjs/generated/service/operationsInterfaces/skillsets.d.ts +1 -8
- package/dist/commonjs/generated/service/operationsInterfaces/skillsets.d.ts.map +1 -1
- package/dist/commonjs/generated/service/operationsInterfaces/skillsets.js.map +1 -1
- package/dist/commonjs/generated/service/searchServiceClient.d.ts +4 -12
- package/dist/commonjs/generated/service/searchServiceClient.d.ts.map +1 -1
- package/dist/commonjs/generated/service/searchServiceClient.js +1 -30
- package/dist/commonjs/generated/service/searchServiceClient.js.map +1 -1
- package/dist/commonjs/index.d.ts +4 -7
- package/dist/commonjs/index.d.ts.map +1 -1
- package/dist/commonjs/index.js +44 -72
- package/dist/commonjs/index.js.map +1 -1
- package/dist/commonjs/indexDocumentsBatch.d.ts +5 -6
- package/dist/commonjs/indexDocumentsBatch.d.ts.map +1 -1
- package/dist/commonjs/indexDocumentsBatch.js +4 -5
- package/dist/commonjs/indexDocumentsBatch.js.map +1 -1
- package/dist/commonjs/indexModels.d.ts +30 -236
- package/dist/commonjs/indexModels.d.ts.map +1 -1
- package/dist/commonjs/indexModels.js.map +1 -1
- package/dist/commonjs/odata.d.ts +2 -5
- package/dist/commonjs/odata.d.ts.map +1 -1
- package/dist/commonjs/odata.js +2 -5
- package/dist/commonjs/odata.js.map +1 -1
- package/dist/commonjs/odataMetadataPolicy.d.ts +1 -1
- package/dist/commonjs/odataMetadataPolicy.d.ts.map +1 -1
- package/dist/commonjs/odataMetadataPolicy.js.map +1 -1
- package/dist/commonjs/searchApiKeyCredentialPolicy.d.ts +4 -4
- package/dist/commonjs/searchApiKeyCredentialPolicy.d.ts.map +1 -1
- package/dist/commonjs/searchApiKeyCredentialPolicy.js +2 -2
- package/dist/commonjs/searchApiKeyCredentialPolicy.js.map +1 -1
- package/dist/commonjs/searchClient.d.ts +53 -64
- package/dist/commonjs/searchClient.d.ts.map +1 -1
- package/dist/commonjs/searchClient.js +53 -113
- package/dist/commonjs/searchClient.js.map +1 -1
- package/dist/commonjs/searchIndexClient.d.ts +18 -151
- package/dist/commonjs/searchIndexClient.d.ts.map +1 -1
- package/dist/commonjs/searchIndexClient.js +17 -458
- package/dist/commonjs/searchIndexClient.js.map +1 -1
- package/dist/commonjs/searchIndexerClient.d.ts +11 -26
- package/dist/commonjs/searchIndexerClient.d.ts.map +1 -1
- package/dist/commonjs/searchIndexerClient.js +7 -62
- package/dist/commonjs/searchIndexerClient.js.map +1 -1
- package/dist/commonjs/searchIndexingBufferedSender.d.ts +3 -3
- package/dist/commonjs/searchIndexingBufferedSender.d.ts.map +1 -1
- package/dist/commonjs/searchIndexingBufferedSender.js +2 -2
- package/dist/commonjs/searchIndexingBufferedSender.js.map +1 -1
- package/dist/commonjs/serviceModels.d.ts +213 -1119
- package/dist/commonjs/serviceModels.d.ts.map +1 -1
- package/dist/commonjs/serviceModels.js +7 -8
- package/dist/commonjs/serviceModels.js.map +1 -1
- package/dist/commonjs/serviceUtils.d.ts +6 -12
- package/dist/commonjs/serviceUtils.d.ts.map +1 -1
- package/dist/commonjs/serviceUtils.js +44 -230
- package/dist/commonjs/serviceUtils.js.map +1 -1
- package/dist/commonjs/synonymMapHelper.d.ts +1 -1
- package/dist/commonjs/synonymMapHelper.d.ts.map +1 -1
- package/dist/commonjs/synonymMapHelper.js +3 -3
- package/dist/commonjs/synonymMapHelper.js.map +1 -1
- package/dist/esm/errorModels.d.ts +17 -23
- package/dist/esm/errorModels.d.ts.map +1 -1
- package/dist/esm/errorModels.js.map +1 -1
- package/dist/esm/generated/data/models/index.d.ts +20 -567
- package/dist/esm/generated/data/models/index.d.ts.map +1 -1
- package/dist/esm/generated/data/models/index.js +6 -216
- package/dist/esm/generated/data/models/index.js.map +1 -1
- package/dist/esm/generated/data/models/mappers.d.ts +0 -18
- package/dist/esm/generated/data/models/mappers.d.ts.map +1 -1
- package/dist/esm/generated/data/models/mappers.js +4 -435
- package/dist/esm/generated/data/models/mappers.js.map +1 -1
- package/dist/esm/generated/data/models/parameters.d.ts +0 -5
- package/dist/esm/generated/data/models/parameters.d.ts.map +1 -1
- package/dist/esm/generated/data/models/parameters.js +0 -51
- package/dist/esm/generated/data/models/parameters.js.map +1 -1
- package/dist/esm/generated/data/operations/documents.js +3 -11
- package/dist/esm/generated/data/operations/documents.js.map +1 -1
- package/dist/esm/generated/data/searchClient.d.ts +3 -3
- package/dist/esm/generated/data/searchClient.d.ts.map +1 -1
- package/dist/esm/generated/data/searchClient.js +1 -1
- package/dist/esm/generated/data/searchClient.js.map +1 -1
- package/dist/esm/generated/service/models/index.d.ts +130 -1023
- package/dist/esm/generated/service/models/index.d.ts.map +1 -1
- package/dist/esm/generated/service/models/index.js +24 -196
- package/dist/esm/generated/service/models/index.js.map +1 -1
- package/dist/esm/generated/service/models/mappers.d.ts +17 -67
- package/dist/esm/generated/service/models/mappers.d.ts.map +1 -1
- package/dist/esm/generated/service/models/mappers.js +1579 -2957
- package/dist/esm/generated/service/models/mappers.js.map +1 -1
- package/dist/esm/generated/service/models/parameters.d.ts +2 -14
- package/dist/esm/generated/service/models/parameters.d.ts.map +1 -1
- package/dist/esm/generated/service/models/parameters.js +7 -89
- package/dist/esm/generated/service/models/parameters.js.map +1 -1
- package/dist/esm/generated/service/operations/aliases.d.ts +1 -43
- package/dist/esm/generated/service/operations/aliases.d.ts.map +1 -1
- package/dist/esm/generated/service/operations/aliases.js +200 -160
- package/dist/esm/generated/service/operations/aliases.js.map +1 -1
- package/dist/esm/generated/service/operations/dataSources.js +1 -4
- package/dist/esm/generated/service/operations/dataSources.js.map +1 -1
- package/dist/esm/generated/service/operations/index.d.ts +0 -3
- package/dist/esm/generated/service/operations/index.d.ts.map +1 -1
- package/dist/esm/generated/service/operations/index.js +0 -3
- package/dist/esm/generated/service/operations/index.js.map +1 -1
- package/dist/esm/generated/service/operations/indexers.d.ts +1 -14
- package/dist/esm/generated/service/operations/indexers.d.ts.map +1 -1
- package/dist/esm/generated/service/operations/indexers.js +1 -54
- package/dist/esm/generated/service/operations/indexers.js.map +1 -1
- package/dist/esm/generated/service/operations/skillsets.d.ts +1 -8
- package/dist/esm/generated/service/operations/skillsets.d.ts.map +1 -1
- package/dist/esm/generated/service/operations/skillsets.js +1 -30
- package/dist/esm/generated/service/operations/skillsets.js.map +1 -1
- package/dist/esm/generated/service/operationsInterfaces/aliases.d.ts +1 -35
- package/dist/esm/generated/service/operationsInterfaces/aliases.d.ts.map +1 -1
- package/dist/esm/generated/service/operationsInterfaces/aliases.js +66 -7
- package/dist/esm/generated/service/operationsInterfaces/aliases.js.map +1 -1
- package/dist/esm/generated/service/operationsInterfaces/index.d.ts +0 -3
- package/dist/esm/generated/service/operationsInterfaces/index.d.ts.map +1 -1
- package/dist/esm/generated/service/operationsInterfaces/index.js +0 -3
- package/dist/esm/generated/service/operationsInterfaces/index.js.map +1 -1
- package/dist/esm/generated/service/operationsInterfaces/indexers.d.ts +1 -14
- package/dist/esm/generated/service/operationsInterfaces/indexers.d.ts.map +1 -1
- package/dist/esm/generated/service/operationsInterfaces/indexers.js.map +1 -1
- package/dist/esm/generated/service/operationsInterfaces/skillsets.d.ts +1 -8
- package/dist/esm/generated/service/operationsInterfaces/skillsets.d.ts.map +1 -1
- package/dist/esm/generated/service/operationsInterfaces/skillsets.js.map +1 -1
- package/dist/esm/generated/service/searchServiceClient.d.ts +4 -12
- package/dist/esm/generated/service/searchServiceClient.d.ts.map +1 -1
- package/dist/esm/generated/service/searchServiceClient.js +2 -31
- package/dist/esm/generated/service/searchServiceClient.js.map +1 -1
- package/dist/esm/index.d.ts +4 -7
- package/dist/esm/index.d.ts.map +1 -1
- package/dist/esm/index.js +3 -5
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/indexDocumentsBatch.d.ts +5 -6
- package/dist/esm/indexDocumentsBatch.d.ts.map +1 -1
- package/dist/esm/indexDocumentsBatch.js +4 -5
- package/dist/esm/indexDocumentsBatch.js.map +1 -1
- package/dist/esm/indexModels.d.ts +30 -236
- package/dist/esm/indexModels.d.ts.map +1 -1
- package/dist/esm/indexModels.js.map +1 -1
- package/dist/esm/odata.d.ts +2 -5
- package/dist/esm/odata.d.ts.map +1 -1
- package/dist/esm/odata.js +2 -5
- package/dist/esm/odata.js.map +1 -1
- package/dist/esm/odataMetadataPolicy.d.ts +1 -1
- package/dist/esm/odataMetadataPolicy.d.ts.map +1 -1
- package/dist/esm/odataMetadataPolicy.js.map +1 -1
- package/dist/esm/searchApiKeyCredentialPolicy.d.ts +4 -4
- package/dist/esm/searchApiKeyCredentialPolicy.d.ts.map +1 -1
- package/dist/esm/searchApiKeyCredentialPolicy.js +2 -2
- package/dist/esm/searchApiKeyCredentialPolicy.js.map +1 -1
- package/dist/esm/searchClient.d.ts +53 -64
- package/dist/esm/searchClient.d.ts.map +1 -1
- package/dist/esm/searchClient.js +53 -113
- package/dist/esm/searchClient.js.map +1 -1
- package/dist/esm/searchIndexClient.d.ts +18 -151
- package/dist/esm/searchIndexClient.d.ts.map +1 -1
- package/dist/esm/searchIndexClient.js +17 -458
- package/dist/esm/searchIndexClient.js.map +1 -1
- package/dist/esm/searchIndexerClient.d.ts +11 -26
- package/dist/esm/searchIndexerClient.d.ts.map +1 -1
- package/dist/esm/searchIndexerClient.js +7 -62
- package/dist/esm/searchIndexerClient.js.map +1 -1
- package/dist/esm/searchIndexingBufferedSender.d.ts +3 -3
- package/dist/esm/searchIndexingBufferedSender.d.ts.map +1 -1
- package/dist/esm/searchIndexingBufferedSender.js +1 -1
- package/dist/esm/searchIndexingBufferedSender.js.map +1 -1
- package/dist/esm/serviceModels.d.ts +213 -1119
- package/dist/esm/serviceModels.d.ts.map +1 -1
- package/dist/esm/serviceModels.js +7 -8
- package/dist/esm/serviceModels.js.map +1 -1
- package/dist/esm/serviceUtils.d.ts +6 -12
- package/dist/esm/serviceUtils.d.ts.map +1 -1
- package/dist/esm/serviceUtils.js +45 -226
- package/dist/esm/serviceUtils.js.map +1 -1
- package/dist/esm/synonymMapHelper.d.ts +1 -1
- package/dist/esm/synonymMapHelper.d.ts.map +1 -1
- package/dist/esm/synonymMapHelper.js +2 -2
- package/dist/esm/synonymMapHelper.js.map +1 -1
- package/dist/react-native/errorModels.d.ts +17 -23
- package/dist/react-native/errorModels.d.ts.map +1 -1
- package/dist/react-native/errorModels.js.map +1 -1
- package/dist/react-native/generated/data/models/index.d.ts +20 -567
- package/dist/react-native/generated/data/models/index.d.ts.map +1 -1
- package/dist/react-native/generated/data/models/index.js +6 -216
- package/dist/react-native/generated/data/models/index.js.map +1 -1
- package/dist/react-native/generated/data/models/mappers.d.ts +0 -18
- package/dist/react-native/generated/data/models/mappers.d.ts.map +1 -1
- package/dist/react-native/generated/data/models/mappers.js +4 -435
- package/dist/react-native/generated/data/models/mappers.js.map +1 -1
- package/dist/react-native/generated/data/models/parameters.d.ts +0 -5
- package/dist/react-native/generated/data/models/parameters.d.ts.map +1 -1
- package/dist/react-native/generated/data/models/parameters.js +0 -51
- package/dist/react-native/generated/data/models/parameters.js.map +1 -1
- package/dist/react-native/generated/data/operations/documents.js +3 -11
- package/dist/react-native/generated/data/operations/documents.js.map +1 -1
- package/dist/react-native/generated/data/searchClient.d.ts +3 -3
- package/dist/react-native/generated/data/searchClient.d.ts.map +1 -1
- package/dist/react-native/generated/data/searchClient.js +1 -1
- package/dist/react-native/generated/data/searchClient.js.map +1 -1
- package/dist/react-native/generated/service/models/index.d.ts +130 -1023
- package/dist/react-native/generated/service/models/index.d.ts.map +1 -1
- package/dist/react-native/generated/service/models/index.js +24 -196
- package/dist/react-native/generated/service/models/index.js.map +1 -1
- package/dist/react-native/generated/service/models/mappers.d.ts +17 -67
- package/dist/react-native/generated/service/models/mappers.d.ts.map +1 -1
- package/dist/react-native/generated/service/models/mappers.js +1579 -2957
- package/dist/react-native/generated/service/models/mappers.js.map +1 -1
- package/dist/react-native/generated/service/models/parameters.d.ts +2 -14
- package/dist/react-native/generated/service/models/parameters.d.ts.map +1 -1
- package/dist/react-native/generated/service/models/parameters.js +7 -89
- package/dist/react-native/generated/service/models/parameters.js.map +1 -1
- package/dist/react-native/generated/service/operations/aliases.d.ts +1 -43
- package/dist/react-native/generated/service/operations/aliases.d.ts.map +1 -1
- package/dist/react-native/generated/service/operations/aliases.js +200 -160
- package/dist/react-native/generated/service/operations/aliases.js.map +1 -1
- package/dist/react-native/generated/service/operations/dataSources.js +1 -4
- package/dist/react-native/generated/service/operations/dataSources.js.map +1 -1
- package/dist/react-native/generated/service/operations/index.d.ts +0 -3
- package/dist/react-native/generated/service/operations/index.d.ts.map +1 -1
- package/dist/react-native/generated/service/operations/index.js +0 -3
- package/dist/react-native/generated/service/operations/index.js.map +1 -1
- package/dist/react-native/generated/service/operations/indexers.d.ts +1 -14
- package/dist/react-native/generated/service/operations/indexers.d.ts.map +1 -1
- package/dist/react-native/generated/service/operations/indexers.js +1 -54
- package/dist/react-native/generated/service/operations/indexers.js.map +1 -1
- package/dist/react-native/generated/service/operations/skillsets.d.ts +1 -8
- package/dist/react-native/generated/service/operations/skillsets.d.ts.map +1 -1
- package/dist/react-native/generated/service/operations/skillsets.js +1 -30
- package/dist/react-native/generated/service/operations/skillsets.js.map +1 -1
- package/dist/react-native/generated/service/operationsInterfaces/aliases.d.ts +1 -35
- package/dist/react-native/generated/service/operationsInterfaces/aliases.d.ts.map +1 -1
- package/dist/react-native/generated/service/operationsInterfaces/aliases.js +66 -7
- package/dist/react-native/generated/service/operationsInterfaces/aliases.js.map +1 -1
- package/dist/react-native/generated/service/operationsInterfaces/index.d.ts +0 -3
- package/dist/react-native/generated/service/operationsInterfaces/index.d.ts.map +1 -1
- package/dist/react-native/generated/service/operationsInterfaces/index.js +0 -3
- package/dist/react-native/generated/service/operationsInterfaces/index.js.map +1 -1
- package/dist/react-native/generated/service/operationsInterfaces/indexers.d.ts +1 -14
- package/dist/react-native/generated/service/operationsInterfaces/indexers.d.ts.map +1 -1
- package/dist/react-native/generated/service/operationsInterfaces/indexers.js.map +1 -1
- package/dist/react-native/generated/service/operationsInterfaces/skillsets.d.ts +1 -8
- package/dist/react-native/generated/service/operationsInterfaces/skillsets.d.ts.map +1 -1
- package/dist/react-native/generated/service/operationsInterfaces/skillsets.js.map +1 -1
- package/dist/react-native/generated/service/searchServiceClient.d.ts +4 -12
- package/dist/react-native/generated/service/searchServiceClient.d.ts.map +1 -1
- package/dist/react-native/generated/service/searchServiceClient.js +2 -31
- package/dist/react-native/generated/service/searchServiceClient.js.map +1 -1
- package/dist/react-native/index.d.ts +4 -7
- package/dist/react-native/index.d.ts.map +1 -1
- package/dist/react-native/index.js +3 -5
- package/dist/react-native/index.js.map +1 -1
- package/dist/react-native/indexDocumentsBatch.d.ts +5 -6
- package/dist/react-native/indexDocumentsBatch.d.ts.map +1 -1
- package/dist/react-native/indexDocumentsBatch.js +4 -5
- package/dist/react-native/indexDocumentsBatch.js.map +1 -1
- package/dist/react-native/indexModels.d.ts +30 -236
- package/dist/react-native/indexModels.d.ts.map +1 -1
- package/dist/react-native/indexModels.js.map +1 -1
- package/dist/react-native/odata.d.ts +2 -5
- package/dist/react-native/odata.d.ts.map +1 -1
- package/dist/react-native/odata.js +2 -5
- package/dist/react-native/odata.js.map +1 -1
- package/dist/react-native/odataMetadataPolicy.d.ts +1 -1
- package/dist/react-native/odataMetadataPolicy.d.ts.map +1 -1
- package/dist/react-native/odataMetadataPolicy.js.map +1 -1
- package/dist/react-native/searchApiKeyCredentialPolicy.d.ts +4 -4
- package/dist/react-native/searchApiKeyCredentialPolicy.d.ts.map +1 -1
- package/dist/react-native/searchApiKeyCredentialPolicy.js +2 -2
- package/dist/react-native/searchApiKeyCredentialPolicy.js.map +1 -1
- package/dist/react-native/searchClient.d.ts +53 -64
- package/dist/react-native/searchClient.d.ts.map +1 -1
- package/dist/react-native/searchClient.js +53 -113
- package/dist/react-native/searchClient.js.map +1 -1
- package/dist/react-native/searchIndexClient.d.ts +18 -151
- package/dist/react-native/searchIndexClient.d.ts.map +1 -1
- package/dist/react-native/searchIndexClient.js +17 -458
- package/dist/react-native/searchIndexClient.js.map +1 -1
- package/dist/react-native/searchIndexerClient.d.ts +11 -26
- package/dist/react-native/searchIndexerClient.d.ts.map +1 -1
- package/dist/react-native/searchIndexerClient.js +7 -62
- package/dist/react-native/searchIndexerClient.js.map +1 -1
- package/dist/react-native/searchIndexingBufferedSender.d.ts +3 -3
- package/dist/react-native/searchIndexingBufferedSender.d.ts.map +1 -1
- package/dist/react-native/searchIndexingBufferedSender.js +1 -1
- package/dist/react-native/searchIndexingBufferedSender.js.map +1 -1
- package/dist/react-native/serviceModels.d.ts +213 -1119
- package/dist/react-native/serviceModels.d.ts.map +1 -1
- package/dist/react-native/serviceModels.js +7 -8
- package/dist/react-native/serviceModels.js.map +1 -1
- package/dist/react-native/serviceUtils.d.ts +6 -12
- package/dist/react-native/serviceUtils.d.ts.map +1 -1
- package/dist/react-native/serviceUtils.js +45 -226
- package/dist/react-native/serviceUtils.js.map +1 -1
- package/dist/react-native/synonymMapHelper.d.ts +1 -1
- package/dist/react-native/synonymMapHelper.d.ts.map +1 -1
- package/dist/react-native/synonymMapHelper.js +2 -2
- package/dist/react-native/synonymMapHelper.js.map +1 -1
- package/package.json +9 -9
- package/dist/browser/generated/knowledgeAgent/index.d.ts +0 -4
- package/dist/browser/generated/knowledgeAgent/index.d.ts.map +0 -1
- package/dist/browser/generated/knowledgeAgent/index.js +0 -11
- package/dist/browser/generated/knowledgeAgent/index.js.map +0 -1
- package/dist/browser/generated/knowledgeAgent/models/index.d.ts +0 -264
- package/dist/browser/generated/knowledgeAgent/models/index.d.ts.map +0 -1
- package/dist/browser/generated/knowledgeAgent/models/index.js +0 -30
- package/dist/browser/generated/knowledgeAgent/models/index.js.map +0 -1
- package/dist/browser/generated/knowledgeAgent/models/mappers.d.ts +0 -43
- package/dist/browser/generated/knowledgeAgent/models/mappers.d.ts.map +0 -1
- package/dist/browser/generated/knowledgeAgent/models/mappers.js +0 -627
- package/dist/browser/generated/knowledgeAgent/models/mappers.js.map +0 -1
- package/dist/browser/generated/knowledgeAgent/models/parameters.d.ts +0 -9
- package/dist/browser/generated/knowledgeAgent/models/parameters.d.ts.map +0 -1
- package/dist/browser/generated/knowledgeAgent/models/parameters.js +0 -75
- package/dist/browser/generated/knowledgeAgent/models/parameters.js.map +0 -1
- package/dist/browser/generated/knowledgeAgent/operations/index.d.ts +0 -2
- package/dist/browser/generated/knowledgeAgent/operations/index.d.ts.map +0 -1
- package/dist/browser/generated/knowledgeAgent/operations/index.js +0 -9
- package/dist/browser/generated/knowledgeAgent/operations/index.js.map +0 -1
- package/dist/browser/generated/knowledgeAgent/operations/knowledgeRetrieval.d.ts +0 -19
- package/dist/browser/generated/knowledgeAgent/operations/knowledgeRetrieval.d.ts.map +0 -1
- package/dist/browser/generated/knowledgeAgent/operations/knowledgeRetrieval.js +0 -54
- package/dist/browser/generated/knowledgeAgent/operations/knowledgeRetrieval.js.map +0 -1
- package/dist/browser/generated/knowledgeAgent/operationsInterfaces/index.d.ts +0 -2
- package/dist/browser/generated/knowledgeAgent/operationsInterfaces/index.d.ts.map +0 -1
- package/dist/browser/generated/knowledgeAgent/operationsInterfaces/index.js +0 -9
- package/dist/browser/generated/knowledgeAgent/operationsInterfaces/index.js.map +0 -1
- package/dist/browser/generated/knowledgeAgent/operationsInterfaces/knowledgeRetrieval.d.ts +0 -11
- package/dist/browser/generated/knowledgeAgent/operationsInterfaces/knowledgeRetrieval.d.ts.map +0 -1
- package/dist/browser/generated/knowledgeAgent/operationsInterfaces/knowledgeRetrieval.js +0 -9
- package/dist/browser/generated/knowledgeAgent/operationsInterfaces/knowledgeRetrieval.js.map +0 -1
- package/dist/browser/generated/knowledgeAgent/searchClient.d.ts +0 -21
- package/dist/browser/generated/knowledgeAgent/searchClient.d.ts.map +0 -1
- package/dist/browser/generated/knowledgeAgent/searchClient.js +0 -88
- package/dist/browser/generated/knowledgeAgent/searchClient.js.map +0 -1
- package/dist/browser/generated/service/operations/knowledgeAgents.d.ts +0 -43
- package/dist/browser/generated/service/operations/knowledgeAgents.d.ts.map +0 -1
- package/dist/browser/generated/service/operations/knowledgeAgents.js +0 -160
- package/dist/browser/generated/service/operations/knowledgeAgents.js.map +0 -1
- package/dist/browser/generated/service/operations/knowledgeSources.d.ts +0 -43
- package/dist/browser/generated/service/operations/knowledgeSources.d.ts.map +0 -1
- package/dist/browser/generated/service/operations/knowledgeSources.js +0 -160
- package/dist/browser/generated/service/operations/knowledgeSources.js.map +0 -1
- package/dist/browser/generated/service/operationsInterfaces/knowledgeAgents.d.ts +0 -35
- package/dist/browser/generated/service/operationsInterfaces/knowledgeAgents.d.ts.map +0 -1
- package/dist/browser/generated/service/operationsInterfaces/knowledgeAgents.js +0 -9
- package/dist/browser/generated/service/operationsInterfaces/knowledgeAgents.js.map +0 -1
- package/dist/browser/generated/service/operationsInterfaces/knowledgeSources.d.ts +0 -35
- package/dist/browser/generated/service/operationsInterfaces/knowledgeSources.d.ts.map +0 -1
- package/dist/browser/generated/service/operationsInterfaces/knowledgeSources.js +0 -9
- package/dist/browser/generated/service/operationsInterfaces/knowledgeSources.js.map +0 -1
- package/dist/browser/knowledgeAgentModels.d.ts +0 -43
- package/dist/browser/knowledgeAgentModels.d.ts.map +0 -1
- package/dist/browser/knowledgeAgentModels.js +0 -4
- package/dist/browser/knowledgeAgentModels.js.map +0 -1
- package/dist/browser/knowledgeRetrievalClient.d.ts +0 -68
- package/dist/browser/knowledgeRetrievalClient.d.ts.map +0 -1
- package/dist/browser/knowledgeRetrievalClient.js +0 -108
- package/dist/browser/knowledgeRetrievalClient.js.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/index.d.ts +0 -4
- package/dist/commonjs/generated/knowledgeAgent/index.d.ts.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/index.js +0 -16
- package/dist/commonjs/generated/knowledgeAgent/index.js.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/models/index.d.ts +0 -264
- package/dist/commonjs/generated/knowledgeAgent/models/index.d.ts.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/models/index.js +0 -33
- package/dist/commonjs/generated/knowledgeAgent/models/index.js.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/models/mappers.d.ts +0 -43
- package/dist/commonjs/generated/knowledgeAgent/models/mappers.d.ts.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/models/mappers.js +0 -630
- package/dist/commonjs/generated/knowledgeAgent/models/mappers.js.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/models/parameters.d.ts +0 -9
- package/dist/commonjs/generated/knowledgeAgent/models/parameters.d.ts.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/models/parameters.js +0 -78
- package/dist/commonjs/generated/knowledgeAgent/models/parameters.js.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/operations/index.d.ts +0 -2
- package/dist/commonjs/generated/knowledgeAgent/operations/index.d.ts.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/operations/index.js +0 -12
- package/dist/commonjs/generated/knowledgeAgent/operations/index.js.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/operations/knowledgeRetrieval.d.ts +0 -19
- package/dist/commonjs/generated/knowledgeAgent/operations/knowledgeRetrieval.d.ts.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/operations/knowledgeRetrieval.js +0 -59
- package/dist/commonjs/generated/knowledgeAgent/operations/knowledgeRetrieval.js.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/operationsInterfaces/index.d.ts +0 -2
- package/dist/commonjs/generated/knowledgeAgent/operationsInterfaces/index.d.ts.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/operationsInterfaces/index.js +0 -12
- package/dist/commonjs/generated/knowledgeAgent/operationsInterfaces/index.js.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/operationsInterfaces/knowledgeRetrieval.d.ts +0 -11
- package/dist/commonjs/generated/knowledgeAgent/operationsInterfaces/knowledgeRetrieval.d.ts.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/operationsInterfaces/knowledgeRetrieval.js +0 -10
- package/dist/commonjs/generated/knowledgeAgent/operationsInterfaces/knowledgeRetrieval.js.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/searchClient.d.ts +0 -21
- package/dist/commonjs/generated/knowledgeAgent/searchClient.d.ts.map +0 -1
- package/dist/commonjs/generated/knowledgeAgent/searchClient.js +0 -93
- package/dist/commonjs/generated/knowledgeAgent/searchClient.js.map +0 -1
- package/dist/commonjs/generated/service/operations/knowledgeAgents.d.ts +0 -43
- package/dist/commonjs/generated/service/operations/knowledgeAgents.d.ts.map +0 -1
- package/dist/commonjs/generated/service/operations/knowledgeAgents.js +0 -165
- package/dist/commonjs/generated/service/operations/knowledgeAgents.js.map +0 -1
- package/dist/commonjs/generated/service/operations/knowledgeSources.d.ts +0 -43
- package/dist/commonjs/generated/service/operations/knowledgeSources.d.ts.map +0 -1
- package/dist/commonjs/generated/service/operations/knowledgeSources.js +0 -165
- package/dist/commonjs/generated/service/operations/knowledgeSources.js.map +0 -1
- package/dist/commonjs/generated/service/operationsInterfaces/knowledgeAgents.d.ts +0 -35
- package/dist/commonjs/generated/service/operationsInterfaces/knowledgeAgents.d.ts.map +0 -1
- package/dist/commonjs/generated/service/operationsInterfaces/knowledgeAgents.js +0 -10
- package/dist/commonjs/generated/service/operationsInterfaces/knowledgeAgents.js.map +0 -1
- package/dist/commonjs/generated/service/operationsInterfaces/knowledgeSources.d.ts +0 -35
- package/dist/commonjs/generated/service/operationsInterfaces/knowledgeSources.d.ts.map +0 -1
- package/dist/commonjs/generated/service/operationsInterfaces/knowledgeSources.js +0 -10
- package/dist/commonjs/generated/service/operationsInterfaces/knowledgeSources.js.map +0 -1
- package/dist/commonjs/knowledgeAgentModels.d.ts +0 -43
- package/dist/commonjs/knowledgeAgentModels.d.ts.map +0 -1
- package/dist/commonjs/knowledgeAgentModels.js +0 -5
- package/dist/commonjs/knowledgeAgentModels.js.map +0 -1
- package/dist/commonjs/knowledgeRetrievalClient.d.ts +0 -68
- package/dist/commonjs/knowledgeRetrievalClient.d.ts.map +0 -1
- package/dist/commonjs/knowledgeRetrievalClient.js +0 -113
- package/dist/commonjs/knowledgeRetrievalClient.js.map +0 -1
- package/dist/esm/generated/knowledgeAgent/index.d.ts +0 -4
- package/dist/esm/generated/knowledgeAgent/index.d.ts.map +0 -1
- package/dist/esm/generated/knowledgeAgent/index.js +0 -11
- package/dist/esm/generated/knowledgeAgent/index.js.map +0 -1
- package/dist/esm/generated/knowledgeAgent/models/index.d.ts +0 -264
- package/dist/esm/generated/knowledgeAgent/models/index.d.ts.map +0 -1
- package/dist/esm/generated/knowledgeAgent/models/index.js +0 -30
- package/dist/esm/generated/knowledgeAgent/models/index.js.map +0 -1
- package/dist/esm/generated/knowledgeAgent/models/mappers.d.ts +0 -43
- package/dist/esm/generated/knowledgeAgent/models/mappers.d.ts.map +0 -1
- package/dist/esm/generated/knowledgeAgent/models/mappers.js +0 -627
- package/dist/esm/generated/knowledgeAgent/models/mappers.js.map +0 -1
- package/dist/esm/generated/knowledgeAgent/models/parameters.d.ts +0 -9
- package/dist/esm/generated/knowledgeAgent/models/parameters.d.ts.map +0 -1
- package/dist/esm/generated/knowledgeAgent/models/parameters.js +0 -75
- package/dist/esm/generated/knowledgeAgent/models/parameters.js.map +0 -1
- package/dist/esm/generated/knowledgeAgent/operations/index.d.ts +0 -2
- package/dist/esm/generated/knowledgeAgent/operations/index.d.ts.map +0 -1
- package/dist/esm/generated/knowledgeAgent/operations/index.js +0 -9
- package/dist/esm/generated/knowledgeAgent/operations/index.js.map +0 -1
- package/dist/esm/generated/knowledgeAgent/operations/knowledgeRetrieval.d.ts +0 -19
- package/dist/esm/generated/knowledgeAgent/operations/knowledgeRetrieval.d.ts.map +0 -1
- package/dist/esm/generated/knowledgeAgent/operations/knowledgeRetrieval.js +0 -54
- package/dist/esm/generated/knowledgeAgent/operations/knowledgeRetrieval.js.map +0 -1
- package/dist/esm/generated/knowledgeAgent/operationsInterfaces/index.d.ts +0 -2
- package/dist/esm/generated/knowledgeAgent/operationsInterfaces/index.d.ts.map +0 -1
- package/dist/esm/generated/knowledgeAgent/operationsInterfaces/index.js +0 -9
- package/dist/esm/generated/knowledgeAgent/operationsInterfaces/index.js.map +0 -1
- package/dist/esm/generated/knowledgeAgent/operationsInterfaces/knowledgeRetrieval.d.ts +0 -11
- package/dist/esm/generated/knowledgeAgent/operationsInterfaces/knowledgeRetrieval.d.ts.map +0 -1
- package/dist/esm/generated/knowledgeAgent/operationsInterfaces/knowledgeRetrieval.js +0 -9
- package/dist/esm/generated/knowledgeAgent/operationsInterfaces/knowledgeRetrieval.js.map +0 -1
- package/dist/esm/generated/knowledgeAgent/searchClient.d.ts +0 -21
- package/dist/esm/generated/knowledgeAgent/searchClient.d.ts.map +0 -1
- package/dist/esm/generated/knowledgeAgent/searchClient.js +0 -88
- package/dist/esm/generated/knowledgeAgent/searchClient.js.map +0 -1
- package/dist/esm/generated/service/operations/knowledgeAgents.d.ts +0 -43
- package/dist/esm/generated/service/operations/knowledgeAgents.d.ts.map +0 -1
- package/dist/esm/generated/service/operations/knowledgeAgents.js +0 -160
- package/dist/esm/generated/service/operations/knowledgeAgents.js.map +0 -1
- package/dist/esm/generated/service/operations/knowledgeSources.d.ts +0 -43
- package/dist/esm/generated/service/operations/knowledgeSources.d.ts.map +0 -1
- package/dist/esm/generated/service/operations/knowledgeSources.js +0 -160
- package/dist/esm/generated/service/operations/knowledgeSources.js.map +0 -1
- package/dist/esm/generated/service/operationsInterfaces/knowledgeAgents.d.ts +0 -35
- package/dist/esm/generated/service/operationsInterfaces/knowledgeAgents.d.ts.map +0 -1
- package/dist/esm/generated/service/operationsInterfaces/knowledgeAgents.js +0 -9
- package/dist/esm/generated/service/operationsInterfaces/knowledgeAgents.js.map +0 -1
- package/dist/esm/generated/service/operationsInterfaces/knowledgeSources.d.ts +0 -35
- package/dist/esm/generated/service/operationsInterfaces/knowledgeSources.d.ts.map +0 -1
- package/dist/esm/generated/service/operationsInterfaces/knowledgeSources.js +0 -9
- package/dist/esm/generated/service/operationsInterfaces/knowledgeSources.js.map +0 -1
- package/dist/esm/knowledgeAgentModels.d.ts +0 -43
- package/dist/esm/knowledgeAgentModels.d.ts.map +0 -1
- package/dist/esm/knowledgeAgentModels.js +0 -4
- package/dist/esm/knowledgeAgentModels.js.map +0 -1
- package/dist/esm/knowledgeRetrievalClient.d.ts +0 -68
- package/dist/esm/knowledgeRetrievalClient.d.ts.map +0 -1
- package/dist/esm/knowledgeRetrievalClient.js +0 -108
- package/dist/esm/knowledgeRetrievalClient.js.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/index.d.ts +0 -4
- package/dist/react-native/generated/knowledgeAgent/index.d.ts.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/index.js +0 -11
- package/dist/react-native/generated/knowledgeAgent/index.js.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/models/index.d.ts +0 -264
- package/dist/react-native/generated/knowledgeAgent/models/index.d.ts.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/models/index.js +0 -30
- package/dist/react-native/generated/knowledgeAgent/models/index.js.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/models/mappers.d.ts +0 -43
- package/dist/react-native/generated/knowledgeAgent/models/mappers.d.ts.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/models/mappers.js +0 -627
- package/dist/react-native/generated/knowledgeAgent/models/mappers.js.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/models/parameters.d.ts +0 -9
- package/dist/react-native/generated/knowledgeAgent/models/parameters.d.ts.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/models/parameters.js +0 -75
- package/dist/react-native/generated/knowledgeAgent/models/parameters.js.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/operations/index.d.ts +0 -2
- package/dist/react-native/generated/knowledgeAgent/operations/index.d.ts.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/operations/index.js +0 -9
- package/dist/react-native/generated/knowledgeAgent/operations/index.js.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/operations/knowledgeRetrieval.d.ts +0 -19
- package/dist/react-native/generated/knowledgeAgent/operations/knowledgeRetrieval.d.ts.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/operations/knowledgeRetrieval.js +0 -54
- package/dist/react-native/generated/knowledgeAgent/operations/knowledgeRetrieval.js.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/operationsInterfaces/index.d.ts +0 -2
- package/dist/react-native/generated/knowledgeAgent/operationsInterfaces/index.d.ts.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/operationsInterfaces/index.js +0 -9
- package/dist/react-native/generated/knowledgeAgent/operationsInterfaces/index.js.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/operationsInterfaces/knowledgeRetrieval.d.ts +0 -11
- package/dist/react-native/generated/knowledgeAgent/operationsInterfaces/knowledgeRetrieval.d.ts.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/operationsInterfaces/knowledgeRetrieval.js +0 -9
- package/dist/react-native/generated/knowledgeAgent/operationsInterfaces/knowledgeRetrieval.js.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/searchClient.d.ts +0 -21
- package/dist/react-native/generated/knowledgeAgent/searchClient.d.ts.map +0 -1
- package/dist/react-native/generated/knowledgeAgent/searchClient.js +0 -88
- package/dist/react-native/generated/knowledgeAgent/searchClient.js.map +0 -1
- package/dist/react-native/generated/service/operations/knowledgeAgents.d.ts +0 -43
- package/dist/react-native/generated/service/operations/knowledgeAgents.d.ts.map +0 -1
- package/dist/react-native/generated/service/operations/knowledgeAgents.js +0 -160
- package/dist/react-native/generated/service/operations/knowledgeAgents.js.map +0 -1
- package/dist/react-native/generated/service/operations/knowledgeSources.d.ts +0 -43
- package/dist/react-native/generated/service/operations/knowledgeSources.d.ts.map +0 -1
- package/dist/react-native/generated/service/operations/knowledgeSources.js +0 -160
- package/dist/react-native/generated/service/operations/knowledgeSources.js.map +0 -1
- package/dist/react-native/generated/service/operationsInterfaces/knowledgeAgents.d.ts +0 -35
- package/dist/react-native/generated/service/operationsInterfaces/knowledgeAgents.d.ts.map +0 -1
- package/dist/react-native/generated/service/operationsInterfaces/knowledgeAgents.js +0 -9
- package/dist/react-native/generated/service/operationsInterfaces/knowledgeAgents.js.map +0 -1
- package/dist/react-native/generated/service/operationsInterfaces/knowledgeSources.d.ts +0 -35
- package/dist/react-native/generated/service/operationsInterfaces/knowledgeSources.d.ts.map +0 -1
- package/dist/react-native/generated/service/operationsInterfaces/knowledgeSources.js +0 -9
- package/dist/react-native/generated/service/operationsInterfaces/knowledgeSources.js.map +0 -1
- package/dist/react-native/knowledgeAgentModels.d.ts +0 -43
- package/dist/react-native/knowledgeAgentModels.d.ts.map +0 -1
- package/dist/react-native/knowledgeAgentModels.js +0 -4
- package/dist/react-native/knowledgeAgentModels.js.map +0 -1
- package/dist/react-native/knowledgeRetrievalClient.d.ts +0 -68
- package/dist/react-native/knowledgeRetrievalClient.d.ts.map +0 -1
- package/dist/react-native/knowledgeRetrievalClient.js +0 -108
- package/dist/react-native/knowledgeRetrievalClient.js.map +0 -1
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"serviceModels.js","sourceRoot":"","sources":["../../src/serviceModels.ts"],"names":[],"mappings":";AAAA,uCAAuC;AACvC,kCAAkC;;;AA+oClC,wCAEC;AAFD,SAAgB,cAAc,CAAC,KAAkB;IAC/C,OAAO,KAAK,CAAC,IAAI,KAAK,iBAAiB,IAAI,KAAK,CAAC,IAAI,KAAK,6BAA6B,CAAC;AAC1F,CAAC;AAwYD;;;GAGG;AACH,IAAY,mBAiEX;AAjED,WAAY,mBAAmB;IAC7B;;;OAGG;IACH,0CAAmB,CAAA;IACnB;;;OAGG;IACH,8CAAuB,CAAA;IACvB;;;OAGG;IACH,6CAAsB,CAAA;IACtB;;;OAGG;IACH,wCAAiB,CAAA;IACjB;;;OAGG;IACH,8CAAuB,CAAA;IACvB;;OAEG;IACH,kFAA2D,CAAA;IAC3D;;OAEG;IACH,mGAA4E,CAAA;IAC5E;;;OAGG;IACH,sCAAe,CAAA;IACf;;;OAGG;IACH,0DAAmC,CAAA;IACnC;;;OAGG;IACH,0CAAmB,CAAA;IACnB;;;;OAIG;IACH,+CAAwB,CAAA;IACxB;;;OAGG;IACH,oDAA6B,CAAA;IAC7B;;;OAGG;IACH,gDAAyB,CAAA;AAC3B,CAAC,EAjEW,mBAAmB,mCAAnB,mBAAmB,QAiE9B;AAED;;;GAGG;AACH,IAAY,qBAiLX;AAjLD,WAAY,qBAAqB;IAC/B;;;OAGG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,kDAAyB,CAAA;IACzB;;;;;OAKG;IACH,sDAA6B,CAAA;IAC7B;;;OAGG;IACH,iDAAwB,CAAA;IACxB;;;;OAIG;IACH,+CAAsB,CAAA;IACtB;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;;OAIG;IACH,oDAA2B,CAAA;IAC3B;;;;OAIG;IACH,mDAA0B,CAAA;IAC1B;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;;OAIG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,mEAA0C,CAAA;IAC1C;;;OAGG;IACH,mEAA0C,CAAA;IAC1C;;;OAGG;IACH,yDAAgC,CAAA;IAChC;;;OAGG;IACH,wCAAe,CAAA;IACf;;;OAGG;IACH,0CAAiB,CAAA;IACjB;;;OAGG;IACH,wCAAe,CAAA;IACf;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;;OAGG;IACH,2CAAkB,CAAA;IAClB;;;OAGG;IACH,uEAA8C,CAAA;IAC9C;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,mDAA0B,CAAA;IAC1B;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,iFAAwD,CAAA;IACxD;;;;OAIG;IACH,kFAAyD,CAAA;IACzD;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;;OAGG;IACH,sCAAa,CAAA;IACb;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,0CAAiB,CAAA;IACjB;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;OAEG;IACH,yDAAgC,CAAA;AAClC,CAAC,EAjLW,qBAAqB,qCAArB,qBAAqB,QAiLhC;AAED;;;GAGG;AACH,IAAY,oBAMX;AAND,WAAY,oBAAoB;IAC9B;;;OAGG;IACH,gDAAwB,CAAA;AAC1B,CAAC,EANW,oBAAoB,oCAApB,oBAAoB,QAM/B;AAED;;;;GAIG;AACH,IAAY,kBAuXX;AAvXD,WAAY,kBAAkB;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,2DAAqC,CAAA;IACrC;;OAEG;IACH,qDAA+B,CAAA;IAC/B;;OAEG;IACH,2DAAqC,CAAA;IACrC;;OAEG;IACH,qDAA+B,CAAA;IAC/B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,uDAAiC,CAAA;IACjC;;OAEG;IACH,iDAA2B,CAAA;IAC3B;;OAEG;IACH,uDAAiC,CAAA;IACjC;;OAEG;IACH,iDAA2B,CAAA;IAC3B;;OAEG,CAAC,kDAA4B,CAAA;IAChC;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,mEAA6C,CAAA;IAC7C;;OAEG;IACH,6DAAuC,CAAA;IACvC;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;;OAGG;IACH,wDAAkC,CAAA;IAClC;;;OAGG;IACH,gFAA0D,CAAA;IAC1D;;;OAGG;IACH,yCAAmB,CAAA;IACnB;;OAEG;IACH,yCAAmB,CAAA;IACnB;;OAEG;IACH,uCAAiB,CAAA;IACjB;;OAEG;IACH,mCAAa,CAAA;IACb;;OAEG;IACH,+CAAyB,CAAA;AAC3B,CAAC,EAvXW,kBAAkB,kCAAlB,kBAAkB,QAuX7B;AA6uCD,6CAA6C","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT License.\n\nimport type { OperationOptions } from \"@azure/core-client\";\nimport type { PagedAsyncIterableIterator } from \"@azure/core-paging\";\nimport type {\n AIFoundryModelCatalogName,\n AIServicesAccountKey,\n AsciiFoldingTokenFilter,\n AzureMachineLearningSkill,\n AzureOpenAIModelName,\n AzureOpenAITokenizerParameters,\n BinaryQuantizationCompression,\n BM25Similarity,\n CharFilterName,\n ChatCompletionExtraParametersBehavior,\n ChatCompletionResponseFormat,\n CjkBigramTokenFilter,\n ClassicSimilarity,\n ClassicTokenizer,\n CognitiveServicesAccount as BaseCognitiveServicesAccount,\n CognitiveServicesAccountKey,\n CommonGramTokenFilter,\n CommonModelParameters,\n ConditionalSkill,\n CorsOptions,\n CustomEntity,\n CustomNormalizer,\n DefaultCognitiveServicesAccount,\n DictionaryDecompounderTokenFilter,\n DistanceScoringFunction,\n DocumentExtractionSkill,\n DocumentIntelligenceLayoutSkillChunkingProperties,\n DocumentIntelligenceLayoutSkillExtractionOptions,\n DocumentIntelligenceLayoutSkillMarkdownHeaderDepth,\n DocumentIntelligenceLayoutSkillOutputFormat,\n DocumentIntelligenceLayoutSkillOutputMode,\n EdgeNGramTokenFilterSide,\n EdgeNGramTokenizer,\n ElisionTokenFilter,\n EntityLinkingSkill,\n EntityRecognitionSkillV3,\n FieldMapping,\n FreshnessScoringFunction,\n HighWaterMarkChangeDetectionPolicy,\n IndexerPermissionOption,\n IndexingSchedule,\n IndexProjectionMode,\n IndexStatisticsSummary,\n KeepTokenFilter,\n KeywordMarkerTokenFilter,\n KnowledgeAgentModel as BaseKnowledgeAgentModel,\n KnownBlobIndexerDataToExtract,\n KnownBlobIndexerImageAction,\n KnownBlobIndexerParsingMode,\n KnownBlobIndexerPDFTextRotationAlgorithm,\n KnownCharFilterName,\n KnownCustomEntityLookupSkillLanguage,\n KnownEntityCategory,\n KnownEntityRecognitionSkillLanguage,\n KnownImageAnalysisSkillLanguage,\n KnownImageDetail,\n KnownIndexerExecutionEnvironment,\n KnownKeyPhraseExtractionSkillLanguage,\n KnownLexicalAnalyzerName,\n KnownLexicalTokenizerName,\n KnownOcrSkillLanguage,\n KnownPIIDetectionSkillMaskingMode,\n KnownRegexFlags,\n KnownSearchFieldDataType,\n KnownSearchIndexerDataSourceType,\n KnownSentimentSkillLanguage,\n KnownSplitSkillLanguage,\n KnownTextSplitMode,\n KnownTextTranslationSkillLanguage,\n KnownTokenFilterName,\n KnownVectorSearchAlgorithmKind,\n KnownVectorSearchAlgorithmMetric,\n KnownVisualFeature,\n LanguageDetectionSkill,\n LengthTokenFilter,\n LexicalAnalyzerName,\n LexicalNormalizerName,\n LexicalTokenizerName,\n LimitTokenFilter,\n LuceneStandardAnalyzer,\n MagnitudeScoringFunction,\n MappingCharFilter,\n MarkdownHeaderDepth,\n MarkdownParsingSubmode,\n MergeSkill,\n MicrosoftLanguageStemmingTokenizer,\n MicrosoftLanguageTokenizer,\n NativeBlobSoftDeleteDeletionDetectionPolicy,\n NGramTokenizer,\n OcrLineEnding,\n PathHierarchyTokenizerV2 as PathHierarchyTokenizer,\n PatternCaptureTokenFilter,\n PatternReplaceCharFilter,\n PatternReplaceTokenFilter,\n PermissionFilter,\n PhoneticTokenFilter,\n ScalarQuantizationCompression,\n ScoringFunctionAggregation,\n SearchAlias,\n SearchIndexerDataContainer,\n SearchIndexerDataNoneIdentity,\n SearchIndexerDataUserAssignedIdentity,\n SearchIndexerIndexProjectionSelector,\n SearchIndexerKnowledgeStoreProjection,\n SearchIndexerSkill as BaseSearchIndexerSkill,\n SearchIndexKnowledgeSourceParameters,\n SearchIndexPermissionFilterOption,\n SemanticSearch,\n SentimentSkillV3,\n ServiceCounters,\n ServiceLimits,\n ShaperSkill,\n ShingleTokenFilter,\n SnowballTokenFilter,\n SoftDeleteColumnDeletionDetectionPolicy,\n SplitSkillUnit,\n SqlIntegratedChangeTrackingPolicy,\n StemmerOverrideTokenFilter,\n StemmerTokenFilter,\n StopAnalyzer,\n StopwordsTokenFilter,\n Suggester as SearchSuggester,\n SynonymTokenFilter,\n TagScoringFunction,\n TextWeights,\n TokenFilterName,\n TruncateTokenFilter,\n UaxUrlEmailTokenizer,\n UniqueTokenFilter,\n VectorEncodingFormat,\n VectorSearchProfile,\n VectorSearchVectorizerKind,\n WordDelimiterTokenFilter,\n} from \"./generated/service/models/index.js\";\nimport type { KnowledgeAgent } from \"./knowledgeAgentModels.js\";\n\n/**\n * Options for a list skillsets operation.\n */\nexport type ListSkillsetsOptions = OperationOptions;\n\n/**\n * Options for a list synonymMaps operation.\n */\nexport type ListSynonymMapsOptions = OperationOptions;\n\n/**\n * Options for a list indexes operation.\n */\nexport type ListIndexesOptions = OperationOptions;\n\n/**\n * Options for a list indexers operation.\n */\nexport type ListIndexersOptions = OperationOptions;\n\n/**\n * Options for a list data sources operation.\n */\nexport type ListDataSourceConnectionsOptions = OperationOptions;\n\n/**\n * Options for get index operation.\n */\nexport type GetIndexOptions = OperationOptions;\n\n/**\n * Options for get skillset operation.\n */\nexport type GetSkillSetOptions = OperationOptions;\n\n/**\n * Options for get synonymmaps operation.\n */\nexport type GetSynonymMapsOptions = OperationOptions;\n\n/**\n * Options for get indexer operation.\n */\nexport type GetIndexerOptions = OperationOptions;\n\n/**\n * Options for get datasource operation.\n */\nexport type GetDataSourceConnectionOptions = OperationOptions;\n\n/**\n * Options for get index statistics operation.\n */\nexport type GetIndexStatisticsOptions = OperationOptions;\n\n/**\n * Statistics for a given index. Statistics are collected periodically and are not guaranteed to\n * always be up-to-date.\n */\nexport interface SearchIndexStatistics {\n /**\n * The number of documents in the index.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly documentCount: number;\n /**\n * The amount of storage in bytes consumed by the index.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly storageSize: number;\n /**\n * The amount of memory in bytes consumed by vectors in the index.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly vectorIndexSize: number;\n}\n\n/**\n * Response from a get service statistics request. If successful, it includes service level\n * counters and limits.\n */\nexport interface SearchServiceStatistics {\n /**\n * Service level resource counters.\n */\n counters: ServiceCounters;\n /**\n * Service level general limits.\n */\n limits: ServiceLimits;\n}\n\n/**\n * Options for get service statistics operation.\n */\nexport type GetServiceStatisticsOptions = OperationOptions;\n\n/**\n * Options for get indexer status operation.\n */\nexport type GetIndexerStatusOptions = OperationOptions;\n\n/**\n * Options for reset indexer operation.\n */\nexport type ResetIndexerOptions = OperationOptions;\n\n/**\n * Options for run indexer operation.\n */\nexport type RunIndexerOptions = OperationOptions;\n\n/**\n * Options for create index operation.\n */\nexport type CreateIndexOptions = OperationOptions;\n\n/**\n * Options for create skillset operation.\n */\nexport type CreateSkillsetOptions = OperationOptions;\n\n/**\n * Options for create alias operation.\n */\nexport type CreateAliasOptions = OperationOptions;\n\n/**\n * Options for create or update alias operation.\n */\nexport interface CreateOrUpdateAliasOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete alias operation.\n */\nexport interface DeleteAliasOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for get alias operation.\n */\nexport type GetAliasOptions = OperationOptions;\n\n/**\n * Options for list aliases operation.\n */\nexport type ListAliasesOptions = OperationOptions;\n\n/**\n * Search Alias object.\n */\nexport type SearchIndexAlias = SearchAlias;\n\n/**\n * Options for create synonymmap operation.\n */\nexport type CreateSynonymMapOptions = OperationOptions;\n\n/**\n * Options for create indexer operation.\n */\nexport type CreateIndexerOptions = OperationOptions;\n\n/**\n * Options for create datasource operation.\n */\nexport type CreateDataSourceConnectionOptions = OperationOptions;\n\n/**\n * Options for create/update index operation.\n */\nexport interface CreateOrUpdateIndexOptions extends OperationOptions {\n /**\n * Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by\n * taking the index offline for at least a few seconds. This temporarily causes indexing and\n * query requests to fail. Performance and write availability of the index can be impaired for\n * several minutes after the index is updated, or longer for very large indexes.\n */\n allowIndexDowntime?: boolean;\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for reset docs operation.\n */\nexport interface ResetDocumentsOptions extends OperationOptions {\n /**\n * document keys to be reset\n */\n documentKeys?: string[];\n /**\n * datasource document identifiers to be reset\n */\n datasourceDocumentIds?: string[];\n /**\n * If false, keys or ids will be appended to existing ones. If true, only the keys or ids in this\n * payload will be queued to be re-ingested.\n */\n overwrite?: boolean;\n}\n\n/**\n * Options for reset skills operation.\n */\nexport interface ResetSkillsOptions extends OperationOptions {\n /**\n * the names of skills to be reset.\n */\n skillNames?: string[];\n}\n\n/**\n * Options for create/update skillset operation.\n */\nexport interface CreateOrUpdateSkillsetOptions extends OperationOptions {\n /**\n * If set to true, Resource will be updated only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n /**\n * Ignores cache reset requirements.\n */\n skipIndexerResetRequirementForCache?: boolean;\n /**\n * Disables cache reprocessing change detection.\n */\n disableCacheReprocessingChangeDetection?: boolean;\n}\n\n/**\n * Options for create/update synonymmap operation.\n */\nexport interface CreateOrUpdateSynonymMapOptions extends OperationOptions {\n /**\n * If set to true, Resource will be updated only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for create/update indexer operation.\n */\nexport interface CreateorUpdateIndexerOptions extends OperationOptions {\n /**\n * If set to true, Resource will be updated only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n /**\n * Ignores cache reset requirements.\n */\n skipIndexerResetRequirementForCache?: boolean;\n /**\n * Disables cache reprocessing change detection.\n */\n disableCacheReprocessingChangeDetection?: boolean;\n}\n\n/**\n * Options for create/update datasource operation.\n */\nexport interface CreateorUpdateDataSourceConnectionOptions extends OperationOptions {\n /**\n * If set to true, Resource will be updated only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n /**\n * Ignores cache reset requirements.\n */\n skipIndexerResetRequirementForCache?: boolean;\n}\n\n/**\n * Options for delete index operation.\n */\nexport interface DeleteIndexOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete skillset operaion.\n */\nexport interface DeleteSkillsetOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete synonymmap operation.\n */\nexport interface DeleteSynonymMapOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete indexer operation.\n */\nexport interface DeleteIndexerOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete datasource operation.\n */\nexport interface DeleteDataSourceConnectionOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Specifies some text and analysis components used to break that text into tokens.\n */\nexport interface AnalyzeRequest {\n /**\n * The text to break into tokens.\n */\n text: string;\n /**\n * The name of the analyzer to use to break the given text. If this parameter is not specified,\n * you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually\n * exclusive. {@link KnownAnalyzerNames} is an enum containing built-in analyzer names.\n * NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.\n */\n analyzerName?: LexicalAnalyzerName;\n /**\n * The name of the tokenizer to use to break the given text. If this parameter is not specified,\n * you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually\n * exclusive. {@link KnownTokenizerNames} is an enum containing built-in tokenizer names.\n * NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.\n */\n tokenizerName?: LexicalTokenizerName;\n /**\n * The name of the normalizer to use to normalize the given text. {@link KnownNormalizerNames} is\n * an enum containing built-in analyzer names.\n */\n normalizerName?: LexicalNormalizerName;\n /**\n * An optional list of token filters to use when breaking the given text. This parameter can only\n * be set when using the tokenizer parameter.\n */\n tokenFilters?: TokenFilterName[];\n /**\n * An optional list of character filters to use when breaking the given text. This parameter can\n * only be set when using the tokenizer parameter.\n */\n charFilters?: CharFilterName[];\n}\n\n/**\n * Options for analyze text operation.\n */\nexport type AnalyzeTextOptions = OperationOptions & AnalyzeRequest;\n\n// BEGIN manually modified generated interfaces\n//\n// This section is for places where we have to manually fix issues\n// with interfaces from the generated code.\n// One issue is that unions of discriminated types generated with\n// their abstract base class as a member.\n\n/**\n * Flexibly separates text into terms via a regular expression pattern. This analyzer is\n * implemented using Apache Lucene.\n */\nexport interface PatternAnalyzer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.PatternAnalyzer\";\n /**\n * The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores,\n * can only start and end with alphanumeric characters, and is limited to 128 characters.\n */\n name: string;\n /**\n * A value indicating whether terms should be lower-cased. Default is true. Default value: true.\n */\n lowerCaseTerms?: boolean;\n /**\n * A regular expression pattern to match token separators. Default is an expression that matches\n * one or more whitespace characters. Default value: `\\W+`.\n */\n pattern?: string;\n /**\n * Regular expression flags. Possible values include: 'CANON_EQ', 'CASE_INSENSITIVE', 'COMMENTS',\n * 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'\n */\n flags?: RegexFlags[];\n /**\n * A list of stopwords.\n */\n stopwords?: string[];\n}\n\n/**\n * Allows you to take control over the process of converting text into indexable/searchable tokens.\n * It's a user-defined configuration consisting of a single predefined tokenizer and one or more\n * filters. The tokenizer is responsible for breaking text into tokens, and the filters for\n * modifying tokens emitted by the tokenizer.\n */\nexport interface CustomAnalyzer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.CustomAnalyzer\";\n /**\n * The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores,\n * can only start and end with alphanumeric characters, and is limited to 128 characters.\n */\n name: string;\n /**\n * The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as\n * breaking a sentence into words. {@link KnownTokenizerNames} is an enum containing built-in\n * tokenizer names.\n */\n tokenizerName: LexicalTokenizerName;\n /**\n * A list of token filters used to filter out or modify the tokens generated by a tokenizer. For\n * example, you can specify a lowercase filter that converts all characters to lowercase. The\n * filters are run in the order in which they are listed.\n */\n tokenFilters?: TokenFilterName[];\n /**\n * A list of character filters used to prepare input text before it is processed by the\n * tokenizer. For instance, they can replace certain characters or symbols. The filters are run\n * in the order in which they are listed.\n */\n charFilters?: CharFilterName[];\n}\n\n/**\n * Contains the possible cases for Analyzer.\n */\nexport type LexicalAnalyzer =\n | CustomAnalyzer\n | PatternAnalyzer\n | LuceneStandardAnalyzer\n | StopAnalyzer;\n\n/**\n * A skill that calls a language model via Azure AI Foundry's Chat Completions endpoint.\n */\nexport interface ChatCompletionSkill extends WebApiSkill {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n odatatype: \"#Microsoft.Skills.Custom.ChatCompletionSkill\";\n /**\n * API key for authenticating to the model. Both apiKey and authIdentity cannot be specified at\n * the same time.\n */\n apiKey?: string;\n /**\n * Common language model parameters that customers can tweak. If omitted, reasonable defaults will\n * be applied.\n */\n commonModelParameters?: CommonModelParameters;\n /**\n * Open-type dictionary for model-specific parameters that should be appended to the chat\n * completions call. Follows Azure AI Foundry’s extensibility pattern.\n */\n extraParameters?: { [propertyName: string]: unknown };\n /**\n * How extra parameters are handled by Azure AI Foundry. Default is 'error'.\n */\n extraParametersBehavior?: ChatCompletionExtraParametersBehavior;\n /**\n * Determines how the LLM should format its response. Defaults to 'text' response type.\n */\n responseFormat?: ChatCompletionResponseFormat;\n}\n\n/**\n * A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call\n * your custom code.\n */\nexport interface WebApiSkill extends BaseSearchIndexerSkill {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n odatatype:\n | \"#Microsoft.Skills.Custom.WebApiSkill\"\n | \"#Microsoft.Skills.Custom.ChatCompletionSkill\";\n /**\n * The url for the Web API.\n */\n uri: string;\n /**\n * The headers required to make the http request.\n */\n httpHeaders?: { [propertyName: string]: string };\n /**\n * The method for the http request.\n */\n httpMethod?: string;\n /**\n * The desired timeout for the request. Default is 30 seconds.\n */\n timeout?: string;\n /**\n * The desired batch size which indicates number of documents.\n */\n batchSize?: number;\n /**\n * If set, the number of parallel calls that can be made to the Web API.\n */\n degreeOfParallelism?: number;\n /**\n * Applies to custom skills that connect to external code in an Azure function or some other\n * application that provides the transformations. This value should be the application ID created\n * for the function or app when it was registered with Azure Active Directory. When specified, the\n * custom skill connects to the function or app using a managed ID (either system or\n * user-assigned) of the search service and the access token of the function or app, using this\n * value as the resource id for creating the scope of the access token.\n */\n authResourceId?: string;\n /**\n * The user-assigned managed identity used for outbound connections. If an authResourceId is\n * provided and it's not specified, the system-assigned managed identity is used. On updates to\n * the indexer, if the identity is unspecified, the value remains unchanged. If set to \"none\", the\n * value of this property is cleared.\n */\n authIdentity?: SearchIndexerDataIdentity;\n}\n\nexport type WebApiSkills = WebApiSkill | ChatCompletionSkill;\n\n/**\n * Allows you to generate a vector embedding for a given image or text input using the Azure AI\n * Services Vision Vectorize API.\n */\nexport interface VisionVectorizeSkill extends BaseSearchIndexerSkill {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n odatatype: \"#Microsoft.Skills.Vision.VectorizeSkill\";\n /**\n * The version of the model to use when calling the AI Services Vision service. It will default to\n * the latest available when not specified.\n */\n modelVersion?: string;\n}\n\n/**\n * Contains the possible cases for Skill.\n */\nexport type SearchIndexerSkill =\n | AzureMachineLearningSkill\n | AzureOpenAIEmbeddingSkill\n | ConditionalSkill\n | CustomEntityLookupSkill\n | DocumentExtractionSkill\n | DocumentIntelligenceLayoutSkill\n | EntityLinkingSkill\n | EntityRecognitionSkill\n | EntityRecognitionSkillV3\n | ImageAnalysisSkill\n | KeyPhraseExtractionSkill\n | LanguageDetectionSkill\n | MergeSkill\n | OcrSkill\n | PIIDetectionSkill\n | SentimentSkill\n | SentimentSkillV3\n | ShaperSkill\n | SplitSkill\n | TextTranslationSkill\n | VisionVectorizeSkill\n | WebApiSkills;\n\n/**\n * A skill that extracts content and layout information (as markdown), via Azure AI Services, from\n * files within the enrichment pipeline.\n */\nexport interface DocumentIntelligenceLayoutSkill extends BaseSearchIndexerSkill {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n odatatype: \"#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill\";\n /**\n * Controls the cardinality of the output format. Default is 'markdown'.\n */\n outputFormat?: DocumentIntelligenceLayoutSkillOutputFormat;\n /**\n * Controls the cardinality of the output produced by the skill. Default is 'oneToMany'.\n */\n outputMode?: DocumentIntelligenceLayoutSkillOutputMode;\n /**\n * The depth of headers in the markdown output. Default is h6.\n */\n markdownHeaderDepth?: DocumentIntelligenceLayoutSkillMarkdownHeaderDepth;\n /**\n * Controls the cardinality of the content extracted from the document by the skill\n */\n extractionOptions?: DocumentIntelligenceLayoutSkillExtractionOptions[];\n /**\n * Controls the cardinality for chunking the content.\n */\n chunkingProperties?: DocumentIntelligenceLayoutSkillChunkingProperties;\n}\n\n/**\n * Contains the possible cases for CognitiveServicesAccount.\n */\nexport type CognitiveServicesAccount =\n | DefaultCognitiveServicesAccount\n | CognitiveServicesAccountKey\n | AIServicesAccountKey\n | AIServicesAccountIdentity;\n\n/**\n * The multi-region account of an Azure AI service resource that's attached to a skillset.\n */\nexport interface AIServicesAccountIdentity extends BaseCognitiveServicesAccount {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n odatatype: \"#Microsoft.Azure.Search.AIServicesByIdentity\";\n /**\n * The user-assigned managed identity used for connections to AI Service. If not specified, the\n * system-assigned managed identity is used. On updates to the skillset, if the identity is\n * unspecified, the value remains unchanged. If set to \"none\", the value of this property is\n * cleared.\n */\n identity?: SearchIndexerDataIdentity;\n /**\n * The subdomain url for the corresponding AI Service.\n */\n subdomainUrl: string;\n}\n/**\n * Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is\n * implemented using Apache Lucene.\n */\nexport interface PatternTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.PatternTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * A regular expression pattern to match token separators. Default is an expression that matches\n * one or more whitespace characters. Default value: `\\W+`.\n */\n pattern?: string;\n /**\n * Regular expression flags. Possible values include: 'CANON_EQ', 'CASE_INSENSITIVE', 'COMMENTS',\n * 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'\n */\n flags?: RegexFlags[];\n /**\n * The zero-based ordinal of the matching group in the regular expression pattern to extract into\n * tokens. Use -1 if you want to use the entire pattern to split the input into tokens,\n * irrespective of matching groups. Default is -1. Default value: -1.\n */\n group?: number;\n}\n/**\n * Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using\n * Apache Lucene.\n */\nexport interface LuceneStandardTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.StandardTokenizerV2\"\n | \"#Microsoft.Azure.Search.StandardTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The maximum token length. Default is 255. Tokens longer than the maximum length are split. The\n * maximum token length that can be used is 300 characters. Default value: 255.\n */\n maxTokenLength?: number;\n}\n\n/**\n * Generates n-grams of the given size(s) starting from the front or the back of an input token.\n * This token filter is implemented using Apache Lucene.\n */\nexport interface EdgeNGramTokenFilter {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.EdgeNGramTokenFilterV2\"\n | \"#Microsoft.Azure.Search.EdgeNGramTokenFilter\";\n /**\n * The name of the token filter. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of\n * maxGram. Default value: 1.\n */\n minGram?: number;\n /**\n * The maximum n-gram length. Default is 2. Maximum is 300. Default value: 2.\n */\n maxGram?: number;\n /**\n * Specifies which side of the input the n-gram should be generated from. Default is \"front\".\n * Possible values include: 'Front', 'Back'\n */\n side?: EdgeNGramTokenFilterSide;\n}\n\n/**\n * Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene.\n */\nexport interface KeywordTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.KeywordTokenizerV2\"\n | \"#Microsoft.Azure.Search.KeywordTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The maximum token length. Default is 256. Tokens longer than the maximum length are split. The\n * maximum token length that can be used is 300 characters. Default value: 256.\n */\n maxTokenLength?: number;\n}\n\n/**\n * Contains the possible cases for Tokenizer.\n */\nexport type LexicalTokenizer =\n | ClassicTokenizer\n | EdgeNGramTokenizer\n | KeywordTokenizer\n | MicrosoftLanguageTokenizer\n | MicrosoftLanguageStemmingTokenizer\n | NGramTokenizer\n | PathHierarchyTokenizer\n | PatternTokenizer\n | LuceneStandardTokenizer\n | UaxUrlEmailTokenizer;\n\n/**\n * Definition of additional projections to azure blob, table, or files, of enriched data.\n */\nexport interface SearchIndexerKnowledgeStore {\n /**\n * The connection string to the storage account projections will be stored in.\n */\n storageConnectionString: string;\n /**\n * A list of additional projections to perform during indexing.\n */\n projections: SearchIndexerKnowledgeStoreProjection[];\n /**\n * The user-assigned managed identity used for connections to Azure Storage when writing\n * knowledge store projections. If the connection string indicates an identity (ResourceId) and\n * it's not specified, the system-assigned managed identity is used. On updates to the indexer,\n * if the identity is unspecified, the value remains unchanged. If set to \"none\", the value of\n * this property is cleared.\n */\n identity?: SearchIndexerDataIdentity;\n}\n\n/**\n * Contains the possible cases for Similarity.\n */\nexport type SimilarityAlgorithm = ClassicSimilarity | BM25Similarity;\n\n/**\n * Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene.\n */\nexport interface NGramTokenFilter {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.NGramTokenFilterV2\"\n | \"#Microsoft.Azure.Search.NGramTokenFilter\";\n /**\n * The name of the token filter. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of\n * maxGram. Default value: 1.\n */\n minGram?: number;\n /**\n * The maximum n-gram length. Default is 2. Maximum is 300. Default value: 2.\n */\n maxGram?: number;\n}\n\n/**\n * Contains the possible cases for TokenFilter.\n */\nexport type TokenFilter =\n | AsciiFoldingTokenFilter\n | CjkBigramTokenFilter\n | CommonGramTokenFilter\n | DictionaryDecompounderTokenFilter\n | EdgeNGramTokenFilter\n | ElisionTokenFilter\n | KeepTokenFilter\n | KeywordMarkerTokenFilter\n | LengthTokenFilter\n | LimitTokenFilter\n | NGramTokenFilter\n | PatternCaptureTokenFilter\n | PatternReplaceTokenFilter\n | PhoneticTokenFilter\n | ShingleTokenFilter\n | SnowballTokenFilter\n | StemmerTokenFilter\n | StemmerOverrideTokenFilter\n | StopwordsTokenFilter\n | SynonymTokenFilter\n | TruncateTokenFilter\n | UniqueTokenFilter\n | WordDelimiterTokenFilter;\n\n/**\n * Contains the possible cases for CharFilter.\n */\nexport type CharFilter = MappingCharFilter | PatternReplaceCharFilter;\n\n/**\n * Contains the possible cases for LexicalNormalizer.\n */\nexport type LexicalNormalizer = CustomNormalizer;\n\n/**\n * Contains the possible cases for ScoringFunction.\n */\nexport type ScoringFunction =\n | DistanceScoringFunction\n | FreshnessScoringFunction\n | MagnitudeScoringFunction\n | TagScoringFunction;\n\n/**\n * Defines values for ComplexDataType.\n * Possible values include: 'Edm.ComplexType', 'Collection(Edm.ComplexType)'\n * @readonly\n */\nexport type ComplexDataType = \"Edm.ComplexType\" | \"Collection(Edm.ComplexType)\";\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport type SearchField = SimpleField | ComplexField;\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport interface SimpleField {\n /**\n * The name of the field, which must be unique within the fields collection of the index or\n * parent field.\n */\n name: string;\n /**\n * The data type of the field.\n */\n type: SearchFieldDataType;\n /**\n * A value indicating whether the field uniquely identifies documents in the index. Exactly one\n * top-level field in each index must be chosen as the key field and it must be of type\n * Edm.String. Key fields can be used to look up documents directly and update or delete specific\n * documents. Default is false.\n */\n key?: boolean;\n /**\n * A value indicating whether the field can be returned in a search result. You can disable this\n * option if you want to use a field (for example, margin) as a filter, sorting, or scoring\n * mechanism but do not want the field to be visible to the end user. This property must be false\n * for key fields. This property can be changed on existing fields. Enabling this property does\n * not cause any increase in index storage requirements. Default is true for vector fields, false\n * otherwise.\n */\n hidden?: boolean;\n /**\n * An immutable value indicating whether the field will be persisted separately on disk to be\n * returned in a search result. You can disable this option if you don't plan to return the field\n * contents in a search response to save on storage overhead. This can only be set during index\n * creation and only for vector fields. This property cannot be changed for existing fields or set\n * as false for new fields. If this property is set to `false`, the property `hidden` must be set\n * to `true`. This property must be true or unset for key fields, for new fields, and for\n * non-vector fields, and it must be null for complex fields. Disabling this property will reduce\n * index storage requirements. The default is true for vector fields.\n */\n stored?: boolean;\n /**\n * A value indicating whether the field is full-text searchable. This means it will undergo\n * analysis such as word-breaking during indexing. If you set a searchable field to a value like\n * \"sunny day\", internally it will be split into the individual tokens \"sunny\" and \"day\". This\n * enables full-text searches for these terms. Fields of type Edm.String or Collection(Edm.String)\n * are searchable by default. This property must be false for simple\n * fields of other non-string data types.\n * Note: searchable fields consume extra space in your index to accommodate additional tokenized\n * versions of the field value for full-text searches. If you want to save space in your index and\n * you don't need a field to be included in searches, set searchable to false. Default is false.\n */\n searchable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in $filter queries.\n * `filterable` differs from `searchable` in how strings are handled. Fields of type Edm.String or\n * Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are for\n * exact matches only. For example, if you set such a field f to \"sunny day\", $filter=f eq 'sunny'\n * will find no matches, but $filter=f eq 'sunny day' will. Default is false.\n */\n filterable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in $orderby expressions. By\n * default, the service sorts results by score, but in many experiences users will want\n * to sort by fields in the documents. A simple field can be sortable only if it is single-valued\n * (it has a single value in the scope of the parent document). Simple collection fields cannot\n * be sortable, since they are multi-valued. Simple sub-fields of complex collections are also\n * multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent\n * field, or an ancestor field, that's the complex collection. The default is false.\n */\n sortable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in facet queries. Typically\n * used in a presentation of search results that includes hit count by category (for example,\n * search for digital cameras and see hits by brand, by megapixels, by price, and so on).\n * Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable.\n * Default is false for all other simple fields.\n */\n facetable?: boolean;\n /**\n * A value indicating whether the field should be used as a permission filter.\n */\n permissionFilter?: PermissionFilter;\n /**\n * The name of the analyzer to use for the field. This option can be used only with\n * searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer.\n * Once the analyzer is chosen, it cannot be changed for the field.\n */\n analyzerName?: LexicalAnalyzerName;\n /**\n * The name of the analyzer used at search time for the field. This option can be used only with\n * searchable fields. It must be set together with `indexAnalyzerName` and it cannot be set\n * together with the `analyzerName` option. This property cannot be set to the name of a language\n * analyzer; use the `analyzerName` property instead if you need a language analyzer. This\n * analyzer can be updated on an existing field.\n */\n searchAnalyzerName?: LexicalAnalyzerName;\n /**\n * The name of the analyzer used at indexing time for the field. This option can be used only with\n * searchable fields. It must be set together with searchAnalyzer and it cannot be set together\n * with the `analyzerName` option. Once the analyzer is chosen, it cannot be changed for the\n * field. KnownAnalyzerNames is an enum containing known values.\n */\n indexAnalyzerName?: LexicalAnalyzerName;\n /**\n * A list of the names of synonym maps to associate with this field. This option can be used only\n * with searchable fields. Currently only one synonym map per field is supported. Assigning a\n * synonym map to a field ensures that query terms targeting that field are expanded at\n * query-time using the rules in the synonym map. This attribute can be changed on existing\n * fields.\n */\n synonymMapNames?: string[];\n /**\n * The name of the normalizer used at indexing time for the field.\n */\n normalizerName?: LexicalNormalizerName;\n /**\n * The dimensionality of the vector field.\n */\n vectorSearchDimensions?: number;\n /**\n * The name of the vector search profile that specifies the algorithm and vectorizer to use when\n * searching the vector field.\n */\n vectorSearchProfileName?: string;\n /**\n * The encoding format to interpret the field contents.\n */\n vectorEncodingFormat?: VectorEncodingFormat;\n}\n\nexport function isComplexField(field: SearchField): field is ComplexField {\n return field.type === \"Edm.ComplexType\" || field.type === \"Collection(Edm.ComplexType)\";\n}\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport interface ComplexField {\n /**\n * The name of the field, which must be unique within the fields collection of the index or\n * parent field.\n */\n name: string;\n /**\n * The data type of the field.\n * Possible values include: 'Edm.ComplexType','Collection(Edm.ComplexType)'\n */\n type: ComplexDataType;\n /**\n * A list of sub-fields.\n */\n fields?: SearchField[];\n}\n\n/**\n * Represents a synonym map definition.\n */\nexport interface SynonymMap {\n /**\n * The name of the synonym map.\n */\n name: string;\n /**\n * An array of synonym rules in the specified synonym map format.\n */\n synonyms: string[];\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your data when you want full assurance\n * that no one, not even Microsoft, can decrypt your data in Azure AI Search. Once you\n * have encrypted your data, it will always remain encrypted. Azure AI Search will ignore\n * attempts to set this property to null. You can change this property as needed if you want to\n * rotate your encryption key; Your data will be unaffected. Encryption with customer-managed\n * keys is not available for free search services, and is only available for paid services\n * created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n /**\n * The ETag of the synonym map.\n */\n etag?: string;\n}\n\n/**\n * An iterator for listing the indexes that exist in the Search service. Will make requests\n * as needed during iteration. Use .byPage() to make one request to the server\n * per iteration.\n */\nexport type IndexIterator = PagedAsyncIterableIterator<SearchIndex, SearchIndex[], {}>;\n\n/**\n * An iterator for statistics summaries for each index in the Search service. Will make requests as\n * needed during iteration. Use .byPage() to make one request to the server per iteration.\n */\nexport type IndexStatisticsSummaryIterator = PagedAsyncIterableIterator<\n IndexStatisticsSummary,\n IndexStatisticsSummary[],\n {}\n>;\n\n/**\n * An iterator for listing the knowledge agents that exist in the Search service. Will make requests\n * as needed during iteration. Use .byPage() to make one request to the server per iteration.\n */\nexport type KnowledgeAgentIterator = PagedAsyncIterableIterator<\n KnowledgeAgent,\n KnowledgeAgent[],\n {}\n>;\n\n/**\n * An iterator for listing the knowledge dSources that exist in the Search service. Will make requests\n * as needed during iteration. Use .byPage() to make one request to the server per iteration.\n */\nexport type KnowledgeSourceIterator = PagedAsyncIterableIterator<\n KnowledgeSource,\n KnowledgeSource[],\n {}\n>;\n\n/**\n * An iterator for listing the aliases that exist in the Search service. This will make requests\n * as needed during iteration. Use .byPage() to make one request to the server\n * per iteration.\n */\nexport type AliasIterator = PagedAsyncIterableIterator<SearchIndexAlias, SearchIndexAlias[], {}>;\n\n/**\n * An iterator for listing the indexes that exist in the Search service. Will make requests\n * as needed during iteration. Use .byPage() to make one request to the server\n * per iteration.\n */\nexport type IndexNameIterator = PagedAsyncIterableIterator<string, string[], {}>;\n\n/**\n * Represents a search index definition, which describes the fields and search behavior of an\n * index.\n */\nexport interface SearchIndex {\n /**\n * The name of the index.\n */\n name: string;\n /**\n * The description of the index.\n */\n description?: string;\n /**\n * The fields of the index.\n */\n fields: SearchField[];\n /**\n * The scoring profiles for the index.\n */\n scoringProfiles?: ScoringProfile[];\n /**\n * The name of the scoring profile to use if none is specified in the query. If this property is\n * not set and no scoring profile is specified in the query, then default scoring (tf-idf) will\n * be used.\n */\n defaultScoringProfile?: string;\n /**\n * Options to control Cross-Origin Resource Sharing (CORS) for the index.\n */\n corsOptions?: CorsOptions;\n /**\n * The suggesters for the index.\n */\n suggesters?: SearchSuggester[];\n /**\n * The analyzers for the index.\n */\n analyzers?: LexicalAnalyzer[];\n /**\n * The tokenizers for the index.\n */\n tokenizers?: LexicalTokenizer[];\n /**\n * The token filters for the index.\n */\n tokenFilters?: TokenFilter[];\n /**\n * The character filters for the index.\n */\n charFilters?: CharFilter[];\n /**\n * The normalizers for the index.\n */\n normalizers?: LexicalNormalizer[];\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your data when you want full assurance\n * that no one, not even Microsoft, can decrypt your data in Azure AI Search. Once you\n * have encrypted your data, it will always remain encrypted. Azure AI Search will ignore\n * attempts to set this property to null. You can change this property as needed if you want to\n * rotate your encryption key; Your data will be unaffected. Encryption with customer-managed\n * keys is not available for free search services, and is only available for paid services\n * created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n /**\n * The type of similarity algorithm to be used when scoring and ranking the documents matching a\n * search query. The similarity algorithm can only be defined at index creation time and cannot\n * be modified on existing indexes. If null, the ClassicSimilarity algorithm is used.\n */\n similarity?: SimilarityAlgorithm;\n /**\n * Defines parameters for a search index that influence semantic capabilities.\n */\n semanticSearch?: SemanticSearch;\n /**\n * Contains configuration options related to vector search.\n */\n vectorSearch?: VectorSearch;\n /**\n * A value indicating whether permission filtering is enabled for the index.\n */\n permissionFilterOption?: SearchIndexPermissionFilterOption;\n /**\n * The ETag of the index.\n */\n etag?: string;\n}\n\nexport interface SearchIndexerCache {\n /**\n * A guid for the SearchIndexerCache.\n */\n id?: string;\n /**\n * The connection string to the storage account where the cache data will be persisted.\n */\n storageConnectionString?: string;\n /**\n * Specifies whether incremental reprocessing is enabled.\n */\n enableReprocessing?: boolean;\n /**\n * The user-assigned managed identity used for connections to the enrichment cache. If the\n * connection string indicates an identity (ResourceId) and it's not specified, the\n * system-assigned managed identity is used. On updates to the indexer, if the identity is\n * unspecified, the value remains unchanged. If set to \"none\", the value of this property is\n * cleared.\n */\n identity?: SearchIndexerDataIdentity;\n}\n\n/**\n * Represents an indexer.\n */\nexport interface SearchIndexer {\n /**\n * The name of the indexer.\n */\n name: string;\n /**\n * The description of the indexer.\n */\n description?: string;\n /**\n * The name of the datasource from which this indexer reads data.\n */\n dataSourceName: string;\n /**\n * The name of the skillset executing with this indexer.\n */\n skillsetName?: string;\n /**\n * The name of the index to which this indexer writes data.\n */\n targetIndexName: string;\n /**\n * The schedule for this indexer.\n */\n schedule?: IndexingSchedule;\n /**\n * Parameters for indexer execution.\n */\n parameters?: IndexingParameters;\n /**\n * Defines mappings between fields in the data source and corresponding target fields in the\n * index.\n */\n fieldMappings?: FieldMapping[];\n /**\n * Output field mappings are applied after enrichment and immediately before indexing.\n */\n outputFieldMappings?: FieldMapping[];\n /**\n * A value indicating whether the indexer is disabled. Default is false. Default value: false.\n */\n isDisabled?: boolean;\n /**\n * The ETag of the indexer.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your indexer definition (as well as\n * indexer execution status) when you want full assurance that no one, not even Microsoft, can\n * decrypt them in Azure AI Search. Once you have encrypted your indexer definition, it\n * will always remain encrypted. Azure AI Search will ignore attempts to set this property\n * to null. You can change this property as needed if you want to rotate your encryption key;\n * Your indexer definition (and indexer execution status) will be unaffected. Encryption with\n * customer-managed keys is not available for free search services, and is only available for\n * paid services created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n /**\n * Adds caching to an enrichment pipeline to allow for incremental modification steps without\n * having to rebuild the index every time.\n */\n cache?: SearchIndexerCache;\n}\n\n/**\n * A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be\n * used to encrypt or decrypt data-at-rest in Azure AI Search, such as indexes and synonym\n * maps.\n */\nexport interface SearchResourceEncryptionKey {\n /**\n * The name of your Azure Key Vault key to be used to encrypt your data at rest.\n */\n keyName: string;\n /**\n * The version of your Azure Key Vault key to be used to encrypt your data at rest.\n */\n keyVersion?: string;\n /**\n * The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be\n * used to encrypt your data at rest. An example URI might be\n * https://my-keyvault-name.vault.azure.net.\n */\n vaultUrl: string;\n /**\n * An AAD Application ID that was granted the required access permissions to the Azure Key Vault\n * that is to be used when encrypting your data at rest. The Application ID should not be\n * confused with the Object ID for your AAD Application.\n */\n applicationId?: string;\n /**\n * The authentication key of the specified AAD application.\n */\n applicationSecret?: string;\n /**\n * An explicit managed identity to use for this encryption key. If not specified and the access\n * credentials property is null, the system-assigned managed identity is used. On update to the\n * resource, if the explicit identity is unspecified, it remains unchanged. If \"none\" is specified,\n * the value of this property is cleared.\n */\n identity?: SearchIndexerDataIdentity;\n}\n\n/**\n * A list of skills.\n */\nexport interface SearchIndexerSkillset {\n /**\n * The name of the skillset.\n */\n name: string;\n /**\n * The description of the skillset.\n */\n description?: string;\n /**\n * A list of skills in the skillset.\n */\n skills: SearchIndexerSkill[];\n /**\n * Details about cognitive services to be used when running skills.\n */\n cognitiveServicesAccount?: CognitiveServicesAccount;\n /**\n * Definition of additional projections to azure blob, table, or files, of enriched data.\n */\n knowledgeStore?: SearchIndexerKnowledgeStore;\n /**\n * Definition of additional projections to secondary search index(es).\n */\n indexProjection?: SearchIndexerIndexProjection;\n /**\n * The ETag of the skillset.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your skillset definition when you want\n * full assurance that no one, not even Microsoft, can decrypt your skillset definition in Azure\n * AI Search. Once you have encrypted your skillset definition, it will always remain\n * encrypted. Azure AI Search will ignore attempts to set this property to null. You can\n * change this property as needed if you want to rotate your encryption key; Your skillset\n * definition will be unaffected. Encryption with customer-managed keys is not available for free\n * search services, and is only available for paid services created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n\n/**\n * Defines parameters for a search index that influence scoring in search queries.\n */\nexport interface ScoringProfile {\n /**\n * The name of the scoring profile.\n */\n name: string;\n /**\n * Parameters that boost scoring based on text matches in certain index fields.\n */\n textWeights?: TextWeights;\n /**\n * The collection of functions that influence the scoring of documents.\n */\n functions?: ScoringFunction[];\n /**\n * A value indicating how the results of individual scoring functions should be combined.\n * Defaults to \"Sum\". Ignored if there are no scoring functions. Possible values include: 'sum',\n * 'average', 'minimum', 'maximum', 'firstMatching'\n */\n functionAggregation?: ScoringFunctionAggregation;\n}\n\n/**\n * Defines values for TokenizerName.\n * @readonly\n */\nexport enum KnownTokenizerNames {\n /**\n * Grammar-based tokenizer that is suitable for processing most European-language documents. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html\n */\n Classic = \"classic\",\n /**\n * Tokenizes the input from an edge into n-grams of the given size(s). See\n * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html\n */\n EdgeNGram = \"edgeNGram\",\n /**\n * Emits the entire input as a single token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html\n */\n Keyword = \"keyword_v2\",\n /**\n * Divides text at non-letters. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LetterTokenizer.html\n */\n Letter = \"letter\",\n /**\n * Divides text at non-letters and converts them to lower case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseTokenizer.html\n */\n Lowercase = \"lowercase\",\n /**\n * Divides text using language-specific rules.\n */\n MicrosoftLanguageTokenizer = \"microsoft_language_tokenizer\",\n /**\n * Divides text using language-specific rules and reduces words to their base forms.\n */\n MicrosoftLanguageStemmingTokenizer = \"microsoft_language_stemming_tokenizer\",\n /**\n * Tokenizes the input into n-grams of the given size(s). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html\n */\n NGram = \"nGram\",\n /**\n * Tokenizer for path-like hierarchies. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html\n */\n PathHierarchy = \"path_hierarchy_v2\",\n /**\n * Tokenizer that uses regex pattern matching to construct distinct tokens. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html\n */\n Pattern = \"pattern\",\n /**\n * Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop\n * filter. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html\n */\n Standard = \"standard_v2\",\n /**\n * Tokenizes urls and emails as one token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html\n */\n UaxUrlEmail = \"uax_url_email\",\n /**\n * Divides text at whitespace. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html\n */\n Whitespace = \"whitespace\",\n}\n\n/**\n * Defines values for TokenFilterName.\n * @readonly\n */\nexport enum KnownTokenFilterNames {\n /**\n * A token filter that applies the Arabic normalizer to normalize the orthography. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html\n */\n ArabicNormalization = \"arabic_normalization\",\n /**\n * Strips all characters after an apostrophe (including the apostrophe itself). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html\n */\n Apostrophe = \"apostrophe\",\n /**\n * Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127\n * ASCII characters (the \"Basic Latin\" Unicode block) into their ASCII equivalents, if such\n * equivalents exist. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html\n */\n AsciiFolding = \"asciifolding\",\n /**\n * Forms bigrams of CJK terms that are generated from StandardTokenizer. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html\n */\n CjkBigram = \"cjk_bigram\",\n /**\n * Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic\n * Latin, and half-width Katakana variants into the equivalent Kana. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html\n */\n CjkWidth = \"cjk_width\",\n /**\n * Removes English possessives, and dots from acronyms. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html\n */\n Classic = \"classic\",\n /**\n * Construct bigrams for frequently occurring terms while indexing. Single terms are still\n * indexed too, with bigrams overlaid. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html\n */\n CommonGram = \"common_grams\",\n /**\n * Generates n-grams of the given size(s) starting from the front or the back of an input token.\n * See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html\n */\n EdgeNGram = \"edgeNGram_v2\",\n /**\n * Removes elisions. For example, \"l'avion\" (the plane) will be converted to \"avion\" (plane). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html\n */\n Elision = \"elision\",\n /**\n * Normalizes German characters according to the heuristics of the German2 snowball algorithm.\n * See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html\n */\n GermanNormalization = \"german_normalization\",\n /**\n * Normalizes text in Hindi to remove some differences in spelling variations. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html\n */\n HindiNormalization = \"hindi_normalization\",\n /**\n * Normalizes the Unicode representation of text in Indian languages. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html\n */\n IndicNormalization = \"indic_normalization\",\n /**\n * Emits each incoming token twice, once as keyword and once as non-keyword. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html\n */\n KeywordRepeat = \"keyword_repeat\",\n /**\n * A high-performance kstem filter for English. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html\n */\n KStem = \"kstem\",\n /**\n * Removes words that are too long or too short. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html\n */\n Length = \"length\",\n /**\n * Limits the number of tokens while indexing. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html\n */\n Limit = \"limit\",\n /**\n * Normalizes token text to lower case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm\n */\n Lowercase = \"lowercase\",\n /**\n * Generates n-grams of the given size(s). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html\n */\n NGram = \"nGram_v2\",\n /**\n * Applies normalization for Persian. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html\n */\n PersianNormalization = \"persian_normalization\",\n /**\n * Create tokens for phonetic matches. See\n * https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html\n */\n Phonetic = \"phonetic\",\n /**\n * Uses the Porter stemming algorithm to transform the token stream. See\n * http://tartarus.org/~martin/PorterStemmer\n */\n PorterStem = \"porter_stem\",\n /**\n * Reverses the token string. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html\n */\n Reverse = \"reverse\",\n /**\n * Normalizes use of the interchangeable Scandinavian characters. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html\n */\n ScandinavianNormalization = \"scandinavian_normalization\",\n /**\n * Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use\n * of double vowels aa, ae, ao, oe and oo, leaving just the first one. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html\n */\n ScandinavianFoldingNormalization = \"scandinavian_folding\",\n /**\n * Creates combinations of tokens as a single token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html\n */\n Shingle = \"shingle\",\n /**\n * A filter that stems words using a Snowball-generated stemmer. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html\n */\n Snowball = \"snowball\",\n /**\n * Normalizes the Unicode representation of Sorani text. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html\n */\n SoraniNormalization = \"sorani_normalization\",\n /**\n * Language specific stemming filter. See\n * https://learn.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters\n */\n Stemmer = \"stemmer\",\n /**\n * Removes stop words from a token stream. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html\n */\n Stopwords = \"stopwords\",\n /**\n * Trims leading and trailing whitespace from tokens. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html\n */\n Trim = \"trim\",\n /**\n * Truncates the terms to a specific length. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html\n */\n Truncate = \"truncate\",\n /**\n * Filters out tokens with same text as the previous token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html\n */\n Unique = \"unique\",\n /**\n * Normalizes token text to upper case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html\n */\n Uppercase = \"uppercase\",\n /**\n * Splits words into subwords and performs optional transformations on subword groups.\n */\n WordDelimiter = \"word_delimiter\",\n}\n\n/**\n * Defines values for CharFilterName.\n * @readonly\n */\nexport enum KnownCharFilterNames {\n /**\n * A character filter that attempts to strip out HTML constructs. See\n * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.html\n */\n HtmlStrip = \"html_strip\",\n}\n\n/**\n * Defines values for AnalyzerName.\n * See https://learn.microsoft.com/rest/api/searchservice/Language-support\n * @readonly\n */\nexport enum KnownAnalyzerNames {\n /**\n * Arabic\n */\n ArMicrosoft = \"ar.microsoft\",\n /**\n * Arabic\n */\n ArLucene = \"ar.lucene\",\n /**\n * Armenian\n */\n HyLucene = \"hy.lucene\",\n /**\n * Bangla\n */\n BnMicrosoft = \"bn.microsoft\",\n /**\n * Basque\n */\n EuLucene = \"eu.lucene\",\n /**\n * Bulgarian\n */\n BgMicrosoft = \"bg.microsoft\",\n /**\n * Bulgarian\n */\n BgLucene = \"bg.lucene\",\n /**\n * Catalan\n */\n CaMicrosoft = \"ca.microsoft\",\n /**\n * Catalan\n */\n CaLucene = \"ca.lucene\",\n /**\n * Chinese Simplified\n */\n ZhHansMicrosoft = \"zh-Hans.microsoft\",\n /**\n * Chinese Simplified\n */\n ZhHansLucene = \"zh-Hans.lucene\",\n /**\n * Chinese Traditional\n */\n ZhHantMicrosoft = \"zh-Hant.microsoft\",\n /**\n * Chinese Traditional\n */\n ZhHantLucene = \"zh-Hant.lucene\",\n /**\n * Croatian\n */\n HrMicrosoft = \"hr.microsoft\",\n /**\n * Czech\n */\n CsMicrosoft = \"cs.microsoft\",\n /**\n * Czech\n */\n CsLucene = \"cs.lucene\",\n /**\n * Danish\n */\n DaMicrosoft = \"da.microsoft\",\n /**\n * Danish\n */\n DaLucene = \"da.lucene\",\n /**\n * Dutch\n */\n NlMicrosoft = \"nl.microsoft\",\n /**\n * Dutch\n */\n NlLucene = \"nl.lucene\",\n /**\n * English\n */\n EnMicrosoft = \"en.microsoft\",\n /**\n * English\n */\n EnLucene = \"en.lucene\",\n /**\n * Estonian\n */\n EtMicrosoft = \"et.microsoft\",\n /**\n * Finnish\n */\n FiMicrosoft = \"fi.microsoft\",\n /**\n * Finnish\n */\n FiLucene = \"fi.lucene\",\n /**\n * French\n */\n FrMicrosoft = \"fr.microsoft\",\n /**\n * French\n */\n FrLucene = \"fr.lucene\",\n /**\n * Galician\n */\n GlLucene = \"gl.lucene\",\n /**\n * German\n */\n DeMicrosoft = \"de.microsoft\",\n /**\n * German\n */\n DeLucene = \"de.lucene\",\n /**\n * Greek\n */\n ElMicrosoft = \"el.microsoft\",\n /**\n * Greek\n */\n ElLucene = \"el.lucene\",\n /**\n * Gujarati\n */\n GuMicrosoft = \"gu.microsoft\",\n /**\n * Hebrew\n */\n HeMicrosoft = \"he.microsoft\",\n /**\n * Hindi\n */\n HiMicrosoft = \"hi.microsoft\",\n /**\n * Hindi\n */\n HiLucene = \"hi.lucene\",\n /**\n * Hungarian\n */\n HuMicrosoft = \"hu.microsoft\",\n /**\n * Hungarian\n */\n HuLucene = \"hu.lucene\",\n /**\n * Icelandic\n */\n IsMicrosoft = \"is.microsoft\",\n /**\n * Indonesian (Bahasa)\n */\n IdMicrosoft = \"id.microsoft\",\n /**\n * Indonesian (Bahasa)\n */\n IdLucene = \"id.lucene\",\n /**\n * Irish\n */\n GaLucene = \"ga.lucene\",\n /**\n * Italian\n */\n ItMicrosoft = \"it.microsoft\",\n /**\n * Italian\n */\n ItLucene = \"it.lucene\",\n /**\n * Japanese\n */\n JaMicrosoft = \"ja.microsoft\",\n /**\n * Japanese\n */\n JaLucene = \"ja.lucene\",\n /**\n * Kannada\n */\n KnMicrosoft = \"kn.microsoft\",\n /**\n * Korean\n */\n KoMicrosoft = \"ko.microsoft\",\n /**\n * Korean\n */\n KoLucene = \"ko.lucene\",\n /**\n * Latvian\n */\n LvMicrosoft = \"lv.microsoft\",\n /**\n * Latvian\n */\n LvLucene = \"lv.lucene\",\n /**\n * Lithuanian\n */\n LtMicrosoft = \"lt.microsoft\",\n /**\n * Malayalam\n */\n MlMicrosoft = \"ml.microsoft\",\n /**\n * Malay (Latin)\n */\n MsMicrosoft = \"ms.microsoft\",\n /**\n * Marathi\n */\n MrMicrosoft = \"mr.microsoft\",\n /**\n * Norwegian\n */\n NbMicrosoft = \"nb.microsoft\",\n /**\n * Norwegian\n */\n NoLucene = \"no.lucene\",\n /**\n * Persian\n */\n FaLucene = \"fa.lucene\",\n /**\n * Polish\n */\n PlMicrosoft = \"pl.microsoft\",\n /**\n * Polish\n */\n PlLucene = \"pl.lucene\",\n /**\n * Portuguese (Brazil)\n */\n PtBRMicrosoft = \"pt-BR.microsoft\",\n /**\n * Portuguese (Brazil)\n */\n PtBRLucene = \"pt-BR.lucene\",\n /**\n * Portuguese (Portugal)\n */\n PtPTMicrosoft = \"pt-PT.microsoft\",\n /**\n * Portuguese (Portugal)\n */\n PtPTLucene = \"pt-PT.lucene\",\n /**\n * Punjabi\n */ PaMicrosoft = \"pa.microsoft\",\n /**\n * Romanian\n */\n RoMicrosoft = \"ro.microsoft\",\n /**\n * Romanian\n */\n RoLucene = \"ro.lucene\",\n /**\n * Russian\n */\n RuMicrosoft = \"ru.microsoft\",\n /**\n * Russian\n */\n RuLucene = \"ru.lucene\",\n /**\n * Serbian (Cyrillic)\n */\n SrCyrillicMicrosoft = \"sr-cyrillic.microsoft\",\n /**\n * Serbian (Latin)\n */\n SrLatinMicrosoft = \"sr-latin.microsoft\",\n /**\n * Slovak\n */\n SkMicrosoft = \"sk.microsoft\",\n /**\n * Slovenian\n */\n SlMicrosoft = \"sl.microsoft\",\n /**\n * Spanish\n */\n EsMicrosoft = \"es.microsoft\",\n /**\n * Spanish\n */\n EsLucene = \"es.lucene\",\n /**\n * Swedish\n */\n SvMicrosoft = \"sv.microsoft\",\n /**\n * Swedish\n */\n SvLucene = \"sv.lucene\",\n /**\n * Tamil\n */\n TaMicrosoft = \"ta.microsoft\",\n /**\n * Telugu\n */\n TeMicrosoft = \"te.microsoft\",\n /**\n * Thai\n */\n ThMicrosoft = \"th.microsoft\",\n /**\n * Thai\n */\n ThLucene = \"th.lucene\",\n /**\n * Turkish\n */\n TrMicrosoft = \"tr.microsoft\",\n /**\n * Turkish\n */\n TrLucene = \"tr.lucene\",\n /**\n * Ukrainian\n */\n UkMicrosoft = \"uk.microsoft\",\n /**\n * Urdu\n */\n UrMicrosoft = \"ur.microsoft\",\n /**\n * Vietnamese\n */\n ViMicrosoft = \"vi.microsoft\",\n /**\n * See:\n * https://lucene.apache.org/core/6_6_1/core/org/apache/lucene/analysis/standard/StandardAnalyzer.html\n */\n StandardLucene = \"standard.lucene\",\n /**\n * See\n * https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html\n */\n StandardAsciiFoldingLucene = \"standardasciifolding.lucene\",\n /**\n * Treats the entire content of a field as a single token. This is useful for data like zip codes,\n * ids, and some product names.\n */\n Keyword = \"keyword\",\n /**\n * Flexibly separates text into terms via a regular expression pattern.\n */\n Pattern = \"pattern\",\n /**\n * Divides text at non-letters and converts them to lower case.\n */\n Simple = \"simple\",\n /**\n * Divides text at non-letters; Applies the lowercase and stopword token filters.\n */\n Stop = \"stop\",\n /**\n * An analyzer that uses the whitespace tokenizer.\n */\n Whitespace = \"whitespace\",\n}\n\n/**\n * Contains the possible cases for DataChangeDetectionPolicy.\n */\nexport type DataChangeDetectionPolicy =\n | HighWaterMarkChangeDetectionPolicy\n | SqlIntegratedChangeTrackingPolicy;\n\n/**\n * Contains the possible cases for SearchIndexerDataIdentity.\n */\nexport type SearchIndexerDataIdentity =\n | SearchIndexerDataNoneIdentity\n | SearchIndexerDataUserAssignedIdentity;\n\n/**\n * Contains the possible cases for DataDeletionDetectionPolicy.\n */\nexport type DataDeletionDetectionPolicy =\n | SoftDeleteColumnDeletionDetectionPolicy\n | NativeBlobSoftDeleteDeletionDetectionPolicy;\n\n/**\n * Represents a datasource definition, which can be used to configure an indexer.\n */\nexport interface SearchIndexerDataSourceConnection {\n /**\n * The name of the datasource.\n */\n name: string;\n /**\n * The description of the datasource.\n */\n description?: string;\n /**\n * The type of the datasource. Possible values include: 'AzureSql', 'CosmosDb', 'AzureBlob',\n * 'AzureTable', 'MySql', 'AdlsGen2'\n */\n type: SearchIndexerDataSourceType;\n /**\n * A specific type of the data source, in case the resource is capable of different modalities. For example, 'MongoDb' for certain 'cosmosDb' accounts.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly subType?: string;\n /**\n * The connection string for the datasource.\n */\n connectionString?: string;\n /**\n * The data container for the datasource.\n */\n container: SearchIndexerDataContainer;\n /**\n * An explicit managed identity to use for this datasource. If not specified and the connection\n * string is a managed identity, the system-assigned managed identity is used. If not specified,\n * the value remains unchanged. If \"none\" is specified, the value of this property is cleared.\n */\n identity?: SearchIndexerDataIdentity;\n /**\n * Ingestion options with various types of permission data.\n */\n indexerPermissionOptions?: IndexerPermissionOption[];\n /**\n * The data change detection policy for the datasource.\n */\n dataChangeDetectionPolicy?: DataChangeDetectionPolicy;\n /**\n * The data deletion detection policy for the datasource.\n */\n dataDeletionDetectionPolicy?: DataDeletionDetectionPolicy;\n /**\n * The ETag of the DataSource.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your datasource definition when you want\n * full assurance that no one, not even Microsoft, can decrypt your data source definition in\n * Azure AI Search. Once you have encrypted your data source definition, it will always\n * remain encrypted. Azure AI Search will ignore attempts to set this property to null.\n * You can change this property as needed if you want to rotate your encryption key; Your\n * datasource definition will be unaffected. Encryption with customer-managed keys is not\n * available for free search services, and is only available for paid services created on or\n * after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n\n/**\n * Contains configuration options related to vector search.\n */\nexport interface VectorSearch {\n /**\n * Defines combinations of configurations to use with vector search.\n */\n profiles?: VectorSearchProfile[];\n /**\n * Contains configuration options specific to the algorithm used during indexing and/or querying.\n */\n algorithms?: VectorSearchAlgorithmConfiguration[];\n /**\n * Contains configuration options on how to vectorize text vector queries.\n */\n vectorizers?: VectorSearchVectorizer[];\n /**\n * Contains configuration options specific to the compression method used during indexing or\n * querying.\n */\n compressions?: VectorSearchCompression[];\n}\n\n/**\n * Contains configuration options specific to the algorithm used during indexing and/or querying.\n */\nexport type VectorSearchAlgorithmConfiguration =\n | HnswAlgorithmConfiguration\n | ExhaustiveKnnAlgorithmConfiguration;\n\n/**\n * Contains configuration options specific to the algorithm used during indexing and/or querying.\n */\nexport interface BaseVectorSearchAlgorithmConfiguration {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n kind: VectorSearchAlgorithmKind;\n /**\n * The name to associate with this particular configuration.\n */\n name: string;\n}\n\n/**\n * Contains configuration options specific to the hnsw approximate nearest neighbors algorithm\n * used during indexing time.\n */\nexport type HnswAlgorithmConfiguration = BaseVectorSearchAlgorithmConfiguration & {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n kind: \"hnsw\";\n /**\n * Contains the parameters specific to hnsw algorithm.\n *\n */\n parameters?: HnswParameters;\n};\n\n/**\n * Contains the parameters specific to hnsw algorithm.\n */\nexport interface HnswParameters {\n /**\n * The number of bi-directional links created for every new element during construction.\n * Increasing this parameter value may improve recall and reduce retrieval times for datasets\n * with high intrinsic dimensionality at the expense of increased memory consumption and longer\n * indexing time.\n */\n m?: number;\n /**\n * The size of the dynamic list containing the nearest neighbors, which is used during index\n * time. Increasing this parameter may improve index quality, at the expense of increased\n * indexing time. At a certain point, increasing this parameter leads to diminishing returns.\n */\n efConstruction?: number;\n /**\n * The size of the dynamic list containing the nearest neighbors, which is used during search\n * time. Increasing this parameter may improve search results, at the expense of slower search.\n * Increasing this parameter leads to diminishing returns.\n */\n efSearch?: number;\n /**\n * The similarity metric to use for vector comparisons.\n */\n metric?: VectorSearchAlgorithmMetric;\n}\n\n/**\n * Contains configuration options specific to the exhaustive KNN algorithm used during querying,\n * which will perform brute-force search across the entire vector index.\n */\nexport type ExhaustiveKnnAlgorithmConfiguration = BaseVectorSearchAlgorithmConfiguration & {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n kind: \"exhaustiveKnn\";\n /**\n * Contains the parameters specific to exhaustive KNN algorithm.\n */\n parameters?: ExhaustiveKnnParameters;\n};\n\n/**\n * Contains the parameters specific to exhaustive KNN algorithm.\n */\nexport interface ExhaustiveKnnParameters {\n /**\n * The similarity metric to use for vector comparisons.\n */\n metric?: VectorSearchAlgorithmMetric;\n}\n\n/**\n * A dictionary of index projection-specific configuration properties. Each name is the name of a\n * specific property. Each value must be of a primitive type.\n */\nexport interface SearchIndexerIndexProjectionParameters {\n /**\n * Describes unknown properties.\n */\n [property: string]: unknown;\n /**\n * Defines behavior of the index projections in relation to the rest of the indexer.\n */\n projectionMode?: IndexProjectionMode;\n}\n\n/**\n * Definition of additional projections to secondary search indexes.\n */\nexport interface SearchIndexerIndexProjection {\n /**\n * A list of projections to be performed to secondary search indexes.\n */\n selectors: SearchIndexerIndexProjectionSelector[];\n /**\n * A dictionary of index projection-specific configuration properties. Each name is the name of a\n * specific property. Each value must be of a primitive type.\n */\n parameters?: SearchIndexerIndexProjectionParameters;\n}\n\n/**\n * Contains specific details for a vectorization method to be used during query time.\n */\nexport interface BaseVectorSearchVectorizer {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n kind: VectorSearchVectorizerKind;\n /**\n * The name to associate with this particular vectorization method.\n */\n vectorizerName: string;\n}\n\n/**\n * Contains the parameters specific to using an Azure Open AI service for vectorization at query\n * time.\n */\nexport interface AzureOpenAIVectorizer extends BaseVectorSearchVectorizer {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n kind: \"azureOpenAI\";\n /**\n * Contains the parameters specific to Azure Open AI embedding vectorization.\n */\n parameters?: AzureOpenAIParameters;\n}\n\n/**\n * Specifies a user-defined vectorizer for generating the vector embedding of a query string.\n * Integration of an external vectorizer is achieved using the custom Web API interface of a\n * skillset.\n */\nexport interface WebApiVectorizer extends BaseVectorSearchVectorizer {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n kind: \"customWebApi\";\n /**\n * Specifies the properties of the user-defined vectorizer.\n */\n parameters?: WebApiParameters;\n}\n\n/**\n * Specifies the properties for connecting to a user-defined vectorizer.\n */\nexport interface WebApiParameters {\n /**\n * The URI of the Web API providing the vectorizer.\n */\n uri?: string;\n /**\n * The headers required to make the HTTP request.\n */\n httpHeaders?: { [propertyName: string]: string };\n /**\n * The method for the HTTP request.\n */\n httpMethod?: string;\n /**\n * The desired timeout for the request. Default is 30 seconds.\n */\n timeout?: string;\n /**\n * Applies to custom endpoints that connect to external code in an Azure function or some other\n * application that provides the transformations. This value should be the application ID created\n * for the function or app when it was registered with Azure Active Directory. When specified, the\n * vectorization connects to the function or app using a managed ID (either system or\n * user-assigned) of the search service and the access token of the function or app, using this\n * value as the resource id for creating the scope of the access token.\n */\n authResourceId?: string;\n /**\n * The user-assigned managed identity used for outbound connections. If an authResourceId is\n * provided and it's not specified, the system-assigned managed identity is used. On updates to\n * the indexer, if the identity is unspecified, the value remains unchanged. If set to \"none\", the\n * value of this property is cleared.\n */\n authIdentity?: SearchIndexerDataIdentity;\n}\n\n/**\n * Contains configuration options on how to vectorize text vector queries.\n */\nexport type VectorSearchVectorizer =\n | AIServicesVisionVectorizer\n | AzureMachineLearningVectorizer\n | AzureOpenAIVectorizer\n | WebApiVectorizer;\n\n/**\n * Specifies the AI Services Vision parameters for vectorizing a query image or text.\n */\nexport interface AIServicesVisionVectorizer extends BaseVectorSearchVectorizer {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n kind: \"aiServicesVision\";\n /**\n * Contains the parameters specific to AI Services Vision embedding vectorization.\n */\n parameters?: AIServicesVisionParameters;\n}\n\n/**\n * Specifies the AI Services Vision parameters for vectorizing a query image or text.\n */\nexport interface AIServicesVisionParameters {\n /**\n * The version of the model to use when calling the AI Services Vision service. It will default to\n * the latest available when not specified.\n */\n modelVersion?: string;\n /**\n * The resource URI of the AI Services resource.\n */\n resourceUri: string;\n /**\n * API key of the designated AI Services resource.\n */\n apiKey?: string;\n /**\n * The user-assigned managed identity used for outbound connections. If an authResourceId is\n * provided and it's not specified, the system-assigned managed identity is used. On updates to\n * the index, if the identity is unspecified, the value remains unchanged. If set to \"none\", the\n * value of this property is cleared.\n */\n authIdentity?: SearchIndexerDataIdentity;\n}\n\n/**\n * Specifies an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog for\n * generating the vector embedding of a query string.\n */\nexport interface AzureMachineLearningVectorizer extends BaseVectorSearchVectorizer {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n kind: \"aml\";\n /**\n * Specifies the properties of the AML vectorizer.\n */\n amlParameters?: AzureMachineLearningVectorizerParameters;\n}\n\n/**\n * Specifies the properties for connecting to an AML vectorizer.\n */\nexport type AzureMachineLearningVectorizerParameters =\n | NoAuthAzureMachineLearningVectorizerParameters\n | KeyAuthAzureMachineLearningVectorizerParameters\n | TokenAuthAzureMachineLearningVectorizerParameters;\n\n/**\n * Specifies the properties common between all AML vectorizer auth types.\n */\nexport interface BaseAzureMachineLearningVectorizerParameters {\n /**\n * When specified, indicates the timeout for the http client making the API call.\n */\n timeout?: string;\n /**\n * The name of the embedding model from the Azure AI Foundry Catalog that is deployed at the\n * provided endpoint.\n */\n modelName?: AIFoundryModelCatalogName;\n}\n\n/**\n * Specifies the properties for connecting to an AML vectorizer with no authentication.\n */\nexport interface NoAuthAzureMachineLearningVectorizerParameters\n extends BaseAzureMachineLearningVectorizerParameters {\n /**\n * Indicates how the service should attempt to identify itself to the AML instance\n */\n authKind: \"none\";\n /**\n * The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI\n * scheme is allowed.\n */\n scoringUri: string;\n}\n\n/**\n * Specifies the properties for connecting to an AML vectorizer with an authentication key.\n */\nexport interface KeyAuthAzureMachineLearningVectorizerParameters\n extends BaseAzureMachineLearningVectorizerParameters {\n /**\n * Indicates how the service should attempt to identify itself to the AML instance\n */\n authKind: \"key\";\n /**\n * The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI\n * scheme is allowed.\n */\n scoringUri: string;\n /**\n * The key for the AML service.\n */\n authenticationKey: string;\n}\n\n/**\n * Specifies the properties for connecting to an AML vectorizer with a managed identity.\n */\nexport interface TokenAuthAzureMachineLearningVectorizerParameters\n extends BaseAzureMachineLearningVectorizerParameters {\n /**\n * Indicates how the service should attempt to identify itself to the AML instance\n */\n authKind: \"token\";\n /**\n * The Azure Resource Manager resource ID of the AML service. It should be in the format\n * subscriptions/\\{guid\\}/resourceGroups/\\{resource-group-name\\}/Microsoft.MachineLearningServices/workspaces/\\{workspace-name\\}/services/\\{service_name\\}.\n */\n resourceId: string;\n /**\n * The region the AML service is deployed in.\n */\n region?: string;\n}\n\n/**\n * Specifies the parameters for connecting to the Azure OpenAI resource.\n */\nexport interface AzureOpenAIParameters {\n /**\n * The resource URI of the Azure OpenAI resource.\n */\n resourceUrl?: string;\n /**\n * ID of the Azure OpenAI model deployment on the designated resource.\n */\n deploymentId?: string;\n /**\n * API key of the designated Azure OpenAI resource.\n */\n apiKey?: string;\n /**\n * The user-assigned managed identity used for outbound connections.\n */\n authIdentity?: SearchIndexerDataIdentity;\n /**\n * The name of the embedding model that is deployed at the provided deploymentId path.\n */\n modelName?: AzureOpenAIModelName;\n}\n\n/**\n * Allows you to generate a vector embedding for a given text input using the Azure OpenAI resource.\n */\nexport interface AzureOpenAIEmbeddingSkill extends BaseSearchIndexerSkill, AzureOpenAIParameters {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n odatatype: \"#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill\";\n /**\n * The number of dimensions the resulting output embeddings should have. Only supported in\n * text-embedding-3 and later models.\n */\n dimensions?: number;\n}\n\n/**\n * A dictionary of knowledge store-specific configuration properties. Each name is the name of a\n * specific property. Each value must be of a primitive type.\n */\nexport interface SearchIndexerKnowledgeStoreParameters {\n /**\n * Describes unknown properties. The value of an unknown property can be of \"any\" type.\n */\n [property: string]: unknown;\n /**\n * Whether or not projections should synthesize a generated key name if one isn't already present.\n */\n synthesizeGeneratedKeyName?: boolean;\n}\n\n/**\n * A dictionary of indexer-specific configuration properties. Each name is the name of a specific\n * property. Each value must be of a primitive type.\n */\nexport interface IndexingParametersConfiguration {\n /**\n * Describes unknown properties. The value of an unknown property can be of \"any\" type.\n */\n [property: string]: any;\n /**\n * Represents the parsing mode for indexing from an Azure blob data source.\n */\n parsingMode?: BlobIndexerParsingMode;\n /**\n * Comma-delimited list of filename extensions to ignore when processing from Azure blob storage.\n * For example, you could exclude \".png, .mp4\" to skip over those files during indexing.\n */\n excludedFileNameExtensions?: string;\n /**\n * Comma-delimited list of filename extensions to select when processing from Azure blob storage.\n * For example, you could focus indexing on specific application files \".docx, .pptx, .msg\" to\n * specifically include those file types.\n */\n indexedFileNameExtensions?: string;\n /**\n * For Azure blobs, set to false if you want to continue indexing when an unsupported content type\n * is encountered, and you don't know all the content types (file extensions) in advance.\n */\n failOnUnsupportedContentType?: boolean;\n /**\n * For Azure blobs, set to false if you want to continue indexing if a document fails indexing.\n */\n failOnUnprocessableDocument?: boolean;\n /**\n * For Azure blobs, set this property to true to still index storage metadata for blob content\n * that is too large to process. Oversized blobs are treated as errors by default. For limits on\n * blob size, see https://learn.microsoft.com/azure/search/search-limits-quotas-capacity.\n */\n indexStorageMetadataOnlyForOversizedDocuments?: boolean;\n /**\n * For CSV blobs, specifies a comma-delimited list of column headers, useful for mapping source\n * fields to destination fields in an index.\n */\n delimitedTextHeaders?: string;\n /**\n * For CSV blobs, specifies the end-of-line single-character delimiter for CSV files where each\n * line starts a new document (for example, \"|\").\n */\n delimitedTextDelimiter?: string;\n /**\n * For CSV blobs, indicates that the first (non-blank) line of each blob contains headers.\n */\n firstLineContainsHeaders?: boolean;\n /**\n * Specifies the submode that will determine whether a markdown file will be parsed into exactly\n * one search document or multiple search documents. Default is `oneToMany`.\n */\n markdownParsingSubmode?: MarkdownParsingSubmode;\n /**\n * Specifies the max header depth that will be considered while grouping markdown content. Default\n * is `h6`.\n */\n markdownHeaderDepth?: MarkdownHeaderDepth;\n /**\n * For JSON arrays, given a structured or semi-structured document, you can specify a path to the\n * array using this property.\n */\n documentRoot?: string;\n /**\n * Specifies the data to extract from Azure blob storage and tells the indexer which data to\n * extract from image content when \"imageAction\" is set to a value other than \"none\". This\n * applies to embedded image content in a .PDF or other application, or image files such as .jpg\n * and .png, in Azure blobs.\n */\n dataToExtract?: BlobIndexerDataToExtract;\n /**\n * Determines how to process embedded images and image files in Azure blob storage. Setting the\n * \"imageAction\" configuration to any value other than \"none\" requires that a skillset also be\n * attached to that indexer.\n */\n imageAction?: BlobIndexerImageAction;\n /**\n * If true, will create a path //document//file_data that is an object representing the original\n * file data downloaded from your blob data source. This allows you to pass the original file\n * data to a custom skill for processing within the enrichment pipeline, or to the Document\n * Extraction skill.\n */\n allowSkillsetToReadFileData?: boolean;\n /**\n * Determines algorithm for text extraction from PDF files in Azure blob storage.\n */\n pdfTextRotationAlgorithm?: BlobIndexerPDFTextRotationAlgorithm;\n /**\n * Specifies the environment in which the indexer should execute.\n */\n executionEnvironment?: IndexerExecutionEnvironment;\n /**\n * Increases the timeout beyond the 5-minute default for Azure SQL database data sources,\n * specified in the format \"hh:mm:ss\".\n */\n queryTimeout?: string;\n}\n\n/**\n * Represents parameters for indexer execution.\n */\nexport interface IndexingParameters {\n /**\n * The number of items that are read from the data source and indexed as a single batch in order\n * to improve performance. The default depends on the data source type.\n */\n batchSize?: number;\n /**\n * The maximum number of items that can fail indexing for indexer execution to still be considered\n * successful. -1 means no limit. Default is 0.\n */\n maxFailedItems?: number;\n /**\n * The maximum number of items in a single batch that can fail indexing for the batch to still be\n * considered successful. -1 means no limit. Default is 0.\n */\n maxFailedItemsPerBatch?: number;\n /**\n * A dictionary of indexer-specific configuration properties. Each name is the name of a specific\n * property. Each value must be of a primitive type.\n */\n configuration?: IndexingParametersConfiguration;\n}\n\n/**\n * A skill looks for text from a custom, user-defined list of words and phrases.\n */\nexport interface CustomEntityLookupSkill extends BaseSearchIndexerSkill {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n odatatype: \"#Microsoft.Skills.Text.CustomEntityLookupSkill\";\n /**\n * A value indicating which language code to use. Default is en.\n */\n defaultLanguageCode?: CustomEntityLookupSkillLanguage;\n /**\n * Path to a JSON or CSV file containing all the target text to match against. This entity\n * definition is read at the beginning of an indexer run. Any updates to this file during an\n * indexer run will not take effect until subsequent runs. This config must be accessible over\n * HTTPS.\n */\n entitiesDefinitionUri?: string;\n /**\n * The inline CustomEntity definition.\n */\n inlineEntitiesDefinition?: CustomEntity[];\n /**\n * A global flag for CaseSensitive. If CaseSensitive is not set in CustomEntity, this value will\n * be the default value.\n */\n globalDefaultCaseSensitive?: boolean;\n /**\n * A global flag for AccentSensitive. If AccentSensitive is not set in CustomEntity, this value\n * will be the default value.\n */\n globalDefaultAccentSensitive?: boolean;\n /**\n * A global flag for FuzzyEditDistance. If FuzzyEditDistance is not set in CustomEntity, this\n * value will be the default value.\n */\n globalDefaultFuzzyEditDistance?: number;\n}\n\n/**\n * Text analytics entity recognition.\n *\n * @deprecated This skill has been deprecated.\n */\nexport interface EntityRecognitionSkill extends BaseSearchIndexerSkill {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n odatatype: \"#Microsoft.Skills.Text.EntityRecognitionSkill\";\n /**\n * A list of entity categories that should be extracted.\n */\n categories?: EntityCategory[];\n /**\n * A value indicating which language code to use. Default is en.\n */\n defaultLanguageCode?: EntityRecognitionSkillLanguage;\n /**\n * Determines whether or not to include entities which are well known but don't conform to a\n * pre-defined type. If this configuration is not set (default), set to null or set to false,\n * entities which don't conform to one of the pre-defined types will not be surfaced.\n */\n includeTypelessEntities?: boolean;\n /**\n * A value between 0 and 1 that be used to only include entities whose confidence score is greater\n * than the value specified. If not set (default), or if explicitly set to null, all entities will\n * be included.\n */\n minimumPrecision?: number;\n}\n\n/**\n * A skill that analyzes image files. It extracts a rich set of visual features based on the image\n * content.\n */\nexport interface ImageAnalysisSkill extends BaseSearchIndexerSkill {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n odatatype: \"#Microsoft.Skills.Vision.ImageAnalysisSkill\";\n /**\n * A value indicating which language code to use. Default is en.\n */\n defaultLanguageCode?: ImageAnalysisSkillLanguage;\n /**\n * A list of visual features.\n */\n visualFeatures?: VisualFeature[];\n /**\n * A string indicating which domain-specific details to return.\n */\n details?: ImageDetail[];\n}\n\n/**\n * A skill that uses text analytics for key phrase extraction.\n */\nexport interface KeyPhraseExtractionSkill extends BaseSearchIndexerSkill {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n odatatype: \"#Microsoft.Skills.Text.KeyPhraseExtractionSkill\";\n /**\n * A value indicating which language code to use. Default is en.\n */\n defaultLanguageCode?: KeyPhraseExtractionSkillLanguage;\n /**\n * A number indicating how many key phrases to return. If absent, all identified key phrases will\n * be returned.\n */\n maxKeyPhraseCount?: number;\n /**\n * The version of the model to use when calling the Text Analytics service. It will default to the\n * latest available when not specified. We recommend you do not specify this value unless\n * absolutely necessary.\n */\n modelVersion?: string;\n}\n\n/**\n * A skill that extracts text from image files.\n */\nexport interface OcrSkill extends BaseSearchIndexerSkill {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n odatatype: \"#Microsoft.Skills.Vision.OcrSkill\";\n /**\n * A value indicating which language code to use. Default is `en`.\n */\n defaultLanguageCode?: OcrSkillLanguage;\n /**\n * A value indicating to turn orientation detection on or not. Default is false.\n */\n shouldDetectOrientation?: boolean;\n /**\n * Defines the sequence of characters to use between the lines of text recognized by the OCR\n * skill. The default value is \"space\".\n */\n lineEnding?: OcrLineEnding;\n}\n\n/**\n * Using the Text Analytics API, extracts personal information from an input text and gives you the\n * option of masking it.\n */\nexport interface PIIDetectionSkill extends BaseSearchIndexerSkill {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n odatatype: \"#Microsoft.Skills.Text.PIIDetectionSkill\";\n /**\n * A value indicating which language code to use. Default is en.\n */\n defaultLanguageCode?: string;\n /**\n * A value between 0 and 1 that be used to only include entities whose confidence score is greater\n * than the value specified. If not set (default), or if explicitly set to null, all entities will\n * be included.\n */\n minimumPrecision?: number;\n /**\n * A parameter that provides various ways to mask the personal information detected in the input\n * text. Default is 'none'.\n */\n maskingMode?: PIIDetectionSkillMaskingMode;\n /**\n * The character used to mask the text if the maskingMode parameter is set to replace. Default is\n * '*'.\n */\n maskingCharacter?: string;\n /**\n * The version of the model to use when calling the Text Analytics service. It will default to the\n * latest available when not specified. We recommend you do not specify this value unless\n * absolutely necessary.\n */\n modelVersion?: string;\n /**\n * A list of PII entity categories that should be extracted and masked.\n */\n categories?: string[];\n /**\n * If specified, will set the PII domain to include only a subset of the entity categories.\n * Possible values include: 'phi', 'none'. Default is 'none'.\n */\n domain?: string;\n}\n\n/**\n * Text analytics positive-negative sentiment analysis, scored as a floating point value in a range\n * of zero to 1.\n *\n * @deprecated This skill has been deprecated.\n */\nexport interface SentimentSkill extends BaseSearchIndexerSkill {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n odatatype: \"#Microsoft.Skills.Text.SentimentSkill\";\n /**\n * A value indicating which language code to use. Default is en.\n */\n defaultLanguageCode?: SentimentSkillLanguage;\n}\n\n/**\n * A skill to split a string into chunks of text.\n */\nexport interface SplitSkill extends BaseSearchIndexerSkill {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n odatatype: \"#Microsoft.Skills.Text.SplitSkill\";\n /**\n * A value indicating which language code to use. Default is `en`.\n */\n defaultLanguageCode?: SplitSkillLanguage;\n /**\n * A value indicating which split mode to perform.\n */\n textSplitMode?: TextSplitMode;\n /**\n * The desired maximum page length. Default is 10000.\n */\n maxPageLength?: number;\n /**\n * Only applicable when textSplitMode is set to 'pages'. If specified, n+1th chunk will start with\n * this number of characters/tokens from the end of the nth chunk.\n */\n pageOverlapLength?: number;\n /**\n * Only applicable when textSplitMode is set to 'pages'. If specified, the SplitSkill will\n * discontinue splitting after processing the first 'maximumPagesToTake' pages, in order to\n * improve performance when only a few initial pages are needed from each document.\n */\n maximumPagesToTake?: number;\n /**\n * Only applies if textSplitMode is set to pages. There are two possible values. The choice of the\n * values will decide the length (maximumPageLength and pageOverlapLength) measurement. The\n * default is 'characters', which means the length will be measured by character.\n */\n unit?: SplitSkillUnit;\n /**\n * Only applies if the unit is set to azureOpenAITokens. If specified, the splitSkill will use\n * these parameters when performing the tokenization. The parameters are a valid\n * 'encoderModelName' and an optional 'allowedSpecialTokens' property.\n */\n azureOpenAITokenizerParameters?: AzureOpenAITokenizerParameters;\n}\n\n/**\n * A skill to translate text from one language to another.\n */\nexport interface TextTranslationSkill extends BaseSearchIndexerSkill {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n odatatype: \"#Microsoft.Skills.Text.TranslationSkill\";\n /**\n * The language code to translate documents into for documents that don't specify the to language\n * explicitly.\n */\n defaultToLanguageCode: TextTranslationSkillLanguage;\n /**\n * The language code to translate documents from for documents that don't specify the from\n * language explicitly.\n */\n defaultFromLanguageCode?: TextTranslationSkillLanguage;\n /**\n * The language code to translate documents from when neither the fromLanguageCode input nor the\n * defaultFromLanguageCode parameter are provided, and the automatic language detection is\n * unsuccessful. Default is en.\n */\n suggestedFrom?: TextTranslationSkillLanguage;\n}\n\n/**\n * A skill that analyzes image files. It extracts a rich set of visual features based on the image\n * content.\n */\nexport interface ImageAnalysisSkill extends BaseSearchIndexerSkill {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n odatatype: \"#Microsoft.Skills.Vision.ImageAnalysisSkill\";\n /**\n * A value indicating which language code to use. Default is en.\n */\n defaultLanguageCode?: ImageAnalysisSkillLanguage;\n /**\n * A list of visual features.\n */\n visualFeatures?: VisualFeature[];\n /**\n * A string indicating which domain-specific details to return.\n */\n details?: ImageDetail[];\n}\n\nexport type KnowledgeSource =\n | BaseKnowledgeSource\n | SearchIndexKnowledgeSource\n | AzureBlobKnowledgeSource;\n\n/**\n * Represents a knowledge source definition.\n */\nexport interface BaseKnowledgeSource {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n kind: \"searchIndex\" | \"azureBlob\";\n /**\n * The name of the knowledge source.\n */\n name: string;\n /**\n * Optional user-defined description.\n */\n description?: string;\n /**\n * The ETag of the agent.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your agent definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have encrypted your agent definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your agent definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n\n/**\n * Knowledge Source targeting a search index.\n */\nexport interface SearchIndexKnowledgeSource extends BaseKnowledgeSource {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n kind: \"searchIndex\";\n /**\n * The parameters for the knowledge source.\n */\n searchIndexParameters: SearchIndexKnowledgeSourceParameters;\n}\n\n/**\n * Configuration for Azure Blob Storage knowledge source.\n */\nexport interface AzureBlobKnowledgeSource extends BaseKnowledgeSource {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n kind: \"azureBlob\";\n /**\n * The type of the knowledge source.\n */\n azureBlobParameters: AzureBlobKnowledgeSourceParameters;\n}\n\n/**\n * Parameters for Azure Blob Storage knowledge source.\n */\nexport interface AzureBlobKnowledgeSourceParameters {\n /**\n * An explicit identity to use for this knowledge source.\n */\n identity?: SearchIndexerDataIdentity;\n /**\n * Key-based connection string or the ResourceId format if using a managed identity.\n */\n connectionString: string;\n /**\n * The name of the blob storage container.\n */\n containerName: string;\n /**\n * Optional folder path within the container.\n */\n folderPath?: string;\n /**\n * Optional vectorizer configuration for vectorizing content.\n */\n embeddingModel?: VectorSearchVectorizer;\n /**\n * Optional chat completion model for image verbalization or context extraction.\n */\n chatCompletionModel?: KnowledgeAgentModel;\n /**\n * Optional schedule for data ingestion.\n */\n ingestionSchedule?: IndexingSchedule;\n /**\n * Resources created by the knowledge source.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly createdResources?: { [propertyName: string]: string };\n /**\n * Indicates whether image verbalization should be disabled.\n */\n disableImageVerbalization?: boolean;\n}\n\nexport type KnowledgeAgentModel = KnowledgeAgentAzureOpenAIModel;\n\nexport interface KnowledgeAgentAzureOpenAIModel extends BaseKnowledgeAgentModel {\n azureOpenAIParameters: AzureOpenAIParameters;\n kind: \"azureOpenAI\";\n}\n\n/**\n * Contains configuration options specific to the compression method used during indexing or querying.\n */\nexport type VectorSearchCompression = BinaryQuantizationCompression | ScalarQuantizationCompression;\n\nexport interface GetIndexStatsSummaryOptions extends OperationOptions {}\n\nexport interface CreateOrUpdateKnowledgeAgentOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\nexport interface DeleteKnowledgeAgentOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\nexport interface GetKnowledgeAgentOptions extends OperationOptions {}\nexport interface ListKnowledgeAgentsOptions extends OperationOptions {}\nexport interface CreateKnowledgeAgentOptions extends OperationOptions {}\n\nexport interface CreateOrUpdateKnowledgeSourceOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\nexport interface DeleteKnowledgeSourceOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\nexport interface GetKnowledgeSourceOptions extends OperationOptions {}\nexport interface ListKnowledgeSourcesOptions extends OperationOptions {}\nexport interface CreateKnowledgeSourceOptions extends OperationOptions {}\n\n/**\n * Defines values for LexicalAnalyzerName.\n * {@link KnownLexicalAnalyzerName} can be used interchangeably with LexicalAnalyzerName,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **ar.microsoft**: Microsoft analyzer for Arabic.\n * **ar.lucene**: Lucene analyzer for Arabic.\n * **hy.lucene**: Lucene analyzer for Armenian.\n * **bn.microsoft**: Microsoft analyzer for Bangla.\n * **eu.lucene**: Lucene analyzer for Basque.\n * **bg.microsoft**: Microsoft analyzer for Bulgarian.\n * **bg.lucene**: Lucene analyzer for Bulgarian.\n * **ca.microsoft**: Microsoft analyzer for Catalan.\n * **ca.lucene**: Lucene analyzer for Catalan.\n * **zh-Hans.microsoft**: Microsoft analyzer for Chinese (Simplified).\n * **zh-Hans.lucene**: Lucene analyzer for Chinese (Simplified).\n * **zh-Hant.microsoft**: Microsoft analyzer for Chinese (Traditional).\n * **zh-Hant.lucene**: Lucene analyzer for Chinese (Traditional).\n * **hr.microsoft**: Microsoft analyzer for Croatian.\n * **cs.microsoft**: Microsoft analyzer for Czech.\n * **cs.lucene**: Lucene analyzer for Czech.\n * **da.microsoft**: Microsoft analyzer for Danish.\n * **da.lucene**: Lucene analyzer for Danish.\n * **nl.microsoft**: Microsoft analyzer for Dutch.\n * **nl.lucene**: Lucene analyzer for Dutch.\n * **en.microsoft**: Microsoft analyzer for English.\n * **en.lucene**: Lucene analyzer for English.\n * **et.microsoft**: Microsoft analyzer for Estonian.\n * **fi.microsoft**: Microsoft analyzer for Finnish.\n * **fi.lucene**: Lucene analyzer for Finnish.\n * **fr.microsoft**: Microsoft analyzer for French.\n * **fr.lucene**: Lucene analyzer for French.\n * **gl.lucene**: Lucene analyzer for Galician.\n * **de.microsoft**: Microsoft analyzer for German.\n * **de.lucene**: Lucene analyzer for German.\n * **el.microsoft**: Microsoft analyzer for Greek.\n * **el.lucene**: Lucene analyzer for Greek.\n * **gu.microsoft**: Microsoft analyzer for Gujarati.\n * **he.microsoft**: Microsoft analyzer for Hebrew.\n * **hi.microsoft**: Microsoft analyzer for Hindi.\n * **hi.lucene**: Lucene analyzer for Hindi.\n * **hu.microsoft**: Microsoft analyzer for Hungarian.\n * **hu.lucene**: Lucene analyzer for Hungarian.\n * **is.microsoft**: Microsoft analyzer for Icelandic.\n * **id.microsoft**: Microsoft analyzer for Indonesian (Bahasa).\n * **id.lucene**: Lucene analyzer for Indonesian.\n * **ga.lucene**: Lucene analyzer for Irish.\n * **it.microsoft**: Microsoft analyzer for Italian.\n * **it.lucene**: Lucene analyzer for Italian.\n * **ja.microsoft**: Microsoft analyzer for Japanese.\n * **ja.lucene**: Lucene analyzer for Japanese.\n * **kn.microsoft**: Microsoft analyzer for Kannada.\n * **ko.microsoft**: Microsoft analyzer for Korean.\n * **ko.lucene**: Lucene analyzer for Korean.\n * **lv.microsoft**: Microsoft analyzer for Latvian.\n * **lv.lucene**: Lucene analyzer for Latvian.\n * **lt.microsoft**: Microsoft analyzer for Lithuanian.\n * **ml.microsoft**: Microsoft analyzer for Malayalam.\n * **ms.microsoft**: Microsoft analyzer for Malay (Latin).\n * **mr.microsoft**: Microsoft analyzer for Marathi.\n * **nb.microsoft**: Microsoft analyzer for Norwegian (Bokmål).\n * **no.lucene**: Lucene analyzer for Norwegian.\n * **fa.lucene**: Lucene analyzer for Persian.\n * **pl.microsoft**: Microsoft analyzer for Polish.\n * **pl.lucene**: Lucene analyzer for Polish.\n * **pt-BR.microsoft**: Microsoft analyzer for Portuguese (Brazil).\n * **pt-BR.lucene**: Lucene analyzer for Portuguese (Brazil).\n * **pt-PT.microsoft**: Microsoft analyzer for Portuguese (Portugal).\n * **pt-PT.lucene**: Lucene analyzer for Portuguese (Portugal).\n * **pa.microsoft**: Microsoft analyzer for Punjabi.\n * **ro.microsoft**: Microsoft analyzer for Romanian.\n * **ro.lucene**: Lucene analyzer for Romanian.\n * **ru.microsoft**: Microsoft analyzer for Russian.\n * **ru.lucene**: Lucene analyzer for Russian.\n * **sr-cyrillic.microsoft**: Microsoft analyzer for Serbian (Cyrillic).\n * **sr-latin.microsoft**: Microsoft analyzer for Serbian (Latin).\n * **sk.microsoft**: Microsoft analyzer for Slovak.\n * **sl.microsoft**: Microsoft analyzer for Slovenian.\n * **es.microsoft**: Microsoft analyzer for Spanish.\n * **es.lucene**: Lucene analyzer for Spanish.\n * **sv.microsoft**: Microsoft analyzer for Swedish.\n * **sv.lucene**: Lucene analyzer for Swedish.\n * **ta.microsoft**: Microsoft analyzer for Tamil.\n * **te.microsoft**: Microsoft analyzer for Telugu.\n * **th.microsoft**: Microsoft analyzer for Thai.\n * **th.lucene**: Lucene analyzer for Thai.\n * **tr.microsoft**: Microsoft analyzer for Turkish.\n * **tr.lucene**: Lucene analyzer for Turkish.\n * **uk.microsoft**: Microsoft analyzer for Ukrainian.\n * **ur.microsoft**: Microsoft analyzer for Urdu.\n * **vi.microsoft**: Microsoft analyzer for Vietnamese.\n * **standard.lucene**: Standard Lucene analyzer.\n * **standardasciifolding.lucene**: Standard ASCII Folding Lucene analyzer. See https:\\/\\/learn.microsoft.com\\/rest\\/api\\/searchservice\\/Custom-analyzers-in-Azure-Search#Analyzers\n * **keyword**: Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/KeywordAnalyzer.html\n * **pattern**: Flexibly separates text into terms via a regular expression pattern. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/PatternAnalyzer.html\n * **simple**: Divides text at non-letters and converts them to lower case. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/SimpleAnalyzer.html\n * **stop**: Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/StopAnalyzer.html\n * **whitespace**: An analyzer that uses the whitespace tokenizer. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/WhitespaceAnalyzer.html\n */\nexport type AnalyzerNames = `${KnownLexicalAnalyzerName}`;\nexport type BlobIndexerDataToExtract = `${KnownBlobIndexerDataToExtract}`;\nexport type BlobIndexerImageAction = `${KnownBlobIndexerImageAction}`;\nexport type BlobIndexerParsingMode = `${KnownBlobIndexerParsingMode}`;\nexport type BlobIndexerPDFTextRotationAlgorithm = `${KnownBlobIndexerPDFTextRotationAlgorithm}`;\nexport type CharFilterNames = `${KnownCharFilterName}`;\nexport type CustomEntityLookupSkillLanguage = `${KnownCustomEntityLookupSkillLanguage}`;\nexport type EntityCategory = `${KnownEntityCategory}`;\nexport type EntityRecognitionSkillLanguage = `${KnownEntityRecognitionSkillLanguage}`;\nexport type ImageAnalysisSkillLanguage = `${KnownImageAnalysisSkillLanguage}`;\nexport type ImageDetail = `${KnownImageDetail}`;\nexport type IndexerExecutionEnvironment = `${KnownIndexerExecutionEnvironment}`;\nexport type KeyPhraseExtractionSkillLanguage = `${KnownKeyPhraseExtractionSkillLanguage}`;\nexport type OcrSkillLanguage = `${KnownOcrSkillLanguage}`;\nexport type PIIDetectionSkillMaskingMode = `${KnownPIIDetectionSkillMaskingMode}`;\nexport type RegexFlags = `${KnownRegexFlags}`;\n/**\n * Defines values for SearchFieldDataType.\n *\n * ### Known values supported by the service:\n *\n * **Edm.String**: Indicates that a field contains a string.\n *\n * **Edm.Int32**: Indicates that a field contains a 32-bit signed integer.\n *\n * **Edm.Int64**: Indicates that a field contains a 64-bit signed integer.\n *\n * **Edm.Double**: Indicates that a field contains an IEEE double-precision floating point number.\n *\n * **Edm.Boolean**: Indicates that a field contains a Boolean value (true or false).\n *\n * **Edm.DateTimeOffset**: Indicates that a field contains a date/time value, including timezone\n * information.\n *\n * **Edm.GeographyPoint**: Indicates that a field contains a geo-location in terms of longitude and\n * latitude.\n *\n * **Edm.ComplexType**: Indicates that a field contains one or more complex objects that in turn\n * have sub-fields of other types.\n *\n * **Edm.Single**: Indicates that a field contains a single-precision floating point number. This is\n * only valid when used as part of a collection type, i.e. Collection(Edm.Single).\n *\n * **Edm.Half**: Indicates that a field contains a half-precision floating point number. This is\n * only valid when used as part of a collection type, i.e. Collection(Edm.Half).\n *\n * **Edm.Int16**: Indicates that a field contains a 16-bit signed integer. This is only valid when\n * used as part of a collection type, i.e. Collection(Edm.Int16).\n *\n * **Edm.SByte**: Indicates that a field contains a 8-bit signed integer. This is only valid when\n * used as part of a collection type, i.e. Collection(Edm.SByte).\n *\n * **Edm.Byte**: Indicates that a field contains a 8-bit unsigned integer. This is only valid when\n * used as part of a collection type, i.e. Collection(Edm.Byte).\n */\nexport type SearchFieldDataType = Exclude<\n `${KnownSearchFieldDataType}` | `Collection(${KnownSearchFieldDataType})`,\n \"Edm.ComplexType\" | \"Edm.Byte\" | \"Edm.Half\" | \"Edm.Int16\" | \"Edm.SByte\" | \"Edm.Single\"\n>;\nexport type SearchIndexerDataSourceType = `${KnownSearchIndexerDataSourceType}`;\nexport type SentimentSkillLanguage = `${KnownSentimentSkillLanguage}`;\nexport type SplitSkillLanguage = `${KnownSplitSkillLanguage}`;\nexport type TextSplitMode = `${KnownTextSplitMode}`;\nexport type TextTranslationSkillLanguage = `${KnownTextTranslationSkillLanguage}`;\nexport type TokenFilterNames = `${KnownTokenFilterName}`;\nexport type TokenizerNames = `${KnownLexicalTokenizerName}`;\nexport type VectorSearchAlgorithmKind = `${KnownVectorSearchAlgorithmKind}`;\nexport type VectorSearchAlgorithmMetric = `${KnownVectorSearchAlgorithmMetric}`;\nexport type VisualFeature = `${KnownVisualFeature}`;\n\n// END manually modified generated interfaces\n"]}
|
|
1
|
+
{"version":3,"file":"serviceModels.js","sourceRoot":"","sources":["../../src/serviceModels.ts"],"names":[],"mappings":";AAAA,uCAAuC;AACvC,kCAAkC;;;AA+9BlC,wCAEC;AAFD,SAAgB,cAAc,CAAC,KAAkB;IAC/C,OAAO,KAAK,CAAC,IAAI,KAAK,iBAAiB,IAAI,KAAK,CAAC,IAAI,KAAK,6BAA6B,CAAC;AAC1F,CAAC;AAqVD;;;GAGG;AACH,IAAY,mBAmEX;AAnED,WAAY,mBAAmB;IAC7B;;;OAGG;IACH,0CAAmB,CAAA;IACnB;;;OAGG;IACH,8CAAuB,CAAA;IACvB;;;OAGG;IACH,6CAAsB,CAAA;IACtB;;;OAGG;IACH,wCAAiB,CAAA;IACjB;;;OAGG;IACH,8CAAuB,CAAA;IACvB;;OAEG;IACH,wDAAwD;IACxD,kFAA2D,CAAA;IAC3D;;OAEG;IACH,wDAAwD;IACxD,mGAA4E,CAAA;IAC5E;;;OAGG;IACH,sCAAe,CAAA;IACf;;;OAGG;IACH,0DAAmC,CAAA;IACnC;;;OAGG;IACH,0CAAmB,CAAA;IACnB;;;;OAIG;IACH,+CAAwB,CAAA;IACxB;;;OAGG;IACH,oDAA6B,CAAA;IAC7B;;;OAGG;IACH,gDAAyB,CAAA;AAC3B,CAAC,EAnEW,mBAAmB,mCAAnB,mBAAmB,QAmE9B;AAED;;;GAGG;AACH,IAAY,qBAiLX;AAjLD,WAAY,qBAAqB;IAC/B;;;OAGG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,kDAAyB,CAAA;IACzB;;;;;OAKG;IACH,sDAA6B,CAAA;IAC7B;;;OAGG;IACH,iDAAwB,CAAA;IACxB;;;;OAIG;IACH,+CAAsB,CAAA;IACtB;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;;OAIG;IACH,oDAA2B,CAAA;IAC3B;;;;OAIG;IACH,mDAA0B,CAAA;IAC1B;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;;OAIG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,mEAA0C,CAAA;IAC1C;;;OAGG;IACH,mEAA0C,CAAA;IAC1C;;;OAGG;IACH,yDAAgC,CAAA;IAChC;;;OAGG;IACH,wCAAe,CAAA;IACf;;;OAGG;IACH,0CAAiB,CAAA;IACjB;;;OAGG;IACH,wCAAe,CAAA;IACf;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;;OAGG;IACH,2CAAkB,CAAA;IAClB;;;OAGG;IACH,uEAA8C,CAAA;IAC9C;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,mDAA0B,CAAA;IAC1B;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,iFAAwD,CAAA;IACxD;;;;OAIG;IACH,kFAAyD,CAAA;IACzD;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,qEAA4C,CAAA;IAC5C;;;OAGG;IACH,4CAAmB,CAAA;IACnB;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;;OAGG;IACH,sCAAa,CAAA;IACb;;;OAGG;IACH,8CAAqB,CAAA;IACrB;;;OAGG;IACH,0CAAiB,CAAA;IACjB;;;OAGG;IACH,gDAAuB,CAAA;IACvB;;OAEG;IACH,yDAAgC,CAAA;AAClC,CAAC,EAjLW,qBAAqB,qCAArB,qBAAqB,QAiLhC;AAED;;;GAGG;AACH,IAAY,oBAMX;AAND,WAAY,oBAAoB;IAC9B;;;OAGG;IACH,gDAAwB,CAAA;AAC1B,CAAC,EANW,oBAAoB,oCAApB,oBAAoB,QAM/B;AAED;;;;GAIG;AACH,IAAY,kBAoXX;AApXD,WAAY,kBAAkB;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,2DAAqC,CAAA;IACrC;;OAEG;IACH,qDAA+B,CAAA;IAC/B;;OAEG;IACH,2DAAqC,CAAA;IACrC;;OAEG;IACH,qDAA+B,CAAA;IAC/B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,uDAAiC,CAAA;IACjC;;OAEG;IACH,iDAA2B,CAAA;IAC3B;;OAEG;IACH,uDAAiC,CAAA;IACjC;;OAEG;IACH,iDAA2B,CAAA;IAC3B;;OAEG,CAAC,kDAA4B,CAAA;IAChC;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,mEAA6C,CAAA;IAC7C;;OAEG;IACH,6DAAuC,CAAA;IACvC;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,4CAAsB,CAAA;IACtB;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,kDAA4B,CAAA;IAC5B;;OAEG;IACH,wDAAkC,CAAA;IAClC;;OAEG;IACH,gFAA0D,CAAA;IAC1D;;OAEG;IACH,yCAAmB,CAAA;IACnB;;OAEG;IACH,yCAAmB,CAAA;IACnB;;OAEG;IACH,uCAAiB,CAAA;IACjB;;OAEG;IACH,mCAAa,CAAA;IACb;;OAEG;IACH,+CAAyB,CAAA;AAC3B,CAAC,EApXW,kBAAkB,kCAAlB,kBAAkB,QAoX7B;AAggBD,6CAA6C","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT License.\n\nimport { OperationOptions } from \"@azure/core-client\";\nimport { PagedAsyncIterableIterator } from \"@azure/core-paging\";\nimport {\n AsciiFoldingTokenFilter,\n AzureOpenAIModelName,\n BaseLexicalNormalizer,\n BM25Similarity,\n BinaryQuantizationCompression,\n CharFilterName,\n CjkBigramTokenFilter,\n ClassicSimilarity,\n ClassicTokenizer,\n CognitiveServicesAccountKey,\n CommonGramTokenFilter,\n ConditionalSkill,\n CorsOptions,\n CustomEntity,\n CustomLexicalNormalizer,\n DefaultCognitiveServicesAccount,\n DictionaryDecompounderTokenFilter,\n DistanceScoringFunction,\n DocumentExtractionSkill,\n DocumentIntelligenceLayoutSkill,\n EdgeNGramTokenFilterSide,\n EdgeNGramTokenizer,\n ElisionTokenFilter,\n EntityLinkingSkill,\n EntityRecognitionSkillV3,\n FieldMapping,\n FreshnessScoringFunction,\n HighWaterMarkChangeDetectionPolicy,\n IndexProjectionMode,\n IndexingSchedule,\n KeepTokenFilter,\n KeywordMarkerTokenFilter,\n KnownBlobIndexerDataToExtract,\n KnownBlobIndexerImageAction,\n KnownBlobIndexerPDFTextRotationAlgorithm,\n KnownBlobIndexerParsingMode,\n KnownCharFilterName,\n KnownCustomEntityLookupSkillLanguage,\n KnownEntityCategory,\n KnownEntityRecognitionSkillLanguage,\n KnownImageAnalysisSkillLanguage,\n KnownImageDetail,\n KnownIndexerExecutionEnvironment,\n KnownKeyPhraseExtractionSkillLanguage,\n KnownLexicalAnalyzerName,\n KnownLexicalTokenizerName,\n KnownOcrSkillLanguage,\n KnownPIIDetectionSkillMaskingMode,\n KnownRegexFlags,\n KnownSearchFieldDataType,\n KnownSearchIndexerDataSourceType,\n KnownSentimentSkillLanguage,\n KnownSplitSkillLanguage,\n KnownTextSplitMode,\n KnownTextTranslationSkillLanguage,\n KnownTokenFilterName,\n KnownVectorSearchAlgorithmKind,\n KnownVectorSearchAlgorithmMetric,\n KnownVisualFeature,\n LanguageDetectionSkill,\n LengthTokenFilter,\n LexicalAnalyzerName,\n LexicalNormalizerName,\n LexicalTokenizerName,\n LimitTokenFilter,\n LuceneStandardAnalyzer,\n MagnitudeScoringFunction,\n MappingCharFilter,\n MergeSkill,\n MicrosoftLanguageStemmingTokenizer,\n MicrosoftLanguageTokenizer,\n NGramTokenizer,\n PathHierarchyTokenizerV2 as PathHierarchyTokenizer,\n PatternCaptureTokenFilter,\n PatternReplaceCharFilter,\n PatternReplaceTokenFilter,\n PhoneticTokenFilter,\n ScalarQuantizationCompression,\n ScoringFunctionAggregation,\n SearchIndexerDataContainer,\n SearchIndexerDataNoneIdentity,\n SearchIndexerDataUserAssignedIdentity,\n SearchIndexerIndexProjectionSelector,\n SearchIndexerKnowledgeStoreProjection,\n SearchIndexerSkill as BaseSearchIndexerSkill,\n SemanticSearch,\n SentimentSkillV3,\n ServiceCounters,\n ServiceLimits,\n ShaperSkill,\n ShingleTokenFilter,\n SnowballTokenFilter,\n SoftDeleteColumnDeletionDetectionPolicy,\n SqlIntegratedChangeTrackingPolicy,\n StemmerOverrideTokenFilter,\n StemmerTokenFilter,\n StopAnalyzer,\n StopwordsTokenFilter,\n Suggester as SearchSuggester,\n SynonymTokenFilter,\n TagScoringFunction,\n TextWeights,\n TokenFilterName,\n TruncateTokenFilter,\n UaxUrlEmailTokenizer,\n UniqueTokenFilter,\n VectorEncodingFormat,\n VectorSearchProfile,\n VectorSearchVectorizerKind,\n WordDelimiterTokenFilter,\n} from \"./generated/service/models/index.js\";\n\n/**\n * Options for a list skillsets operation.\n */\nexport type ListSkillsetsOptions = OperationOptions;\n\n/**\n * Options for a list synonymMaps operation.\n */\nexport type ListSynonymMapsOptions = OperationOptions;\n\n/**\n * Options for a list indexes operation.\n */\nexport type ListIndexesOptions = OperationOptions;\n\n/**\n * Options for a list indexers operation.\n */\nexport type ListIndexersOptions = OperationOptions;\n\n/**\n * Options for a list data sources operation.\n */\nexport type ListDataSourceConnectionsOptions = OperationOptions;\n\n/**\n * Options for get index operation.\n */\nexport type GetIndexOptions = OperationOptions;\n\n/**\n * Options for get skillset operation.\n */\nexport type GetSkillSetOptions = OperationOptions;\n\n/**\n * Options for get synonymmaps operation.\n */\nexport type GetSynonymMapsOptions = OperationOptions;\n\n/**\n * Options for get indexer operation.\n */\nexport type GetIndexerOptions = OperationOptions;\n\n/**\n * Options for get datasource operation.\n */\nexport type GetDataSourceConnectionOptions = OperationOptions;\n\n/**\n * Options for get index statistics operation.\n */\nexport type GetIndexStatisticsOptions = OperationOptions;\n\n/**\n * Statistics for a given index. Statistics are collected periodically and are not guaranteed to\n * always be up-to-date.\n */\nexport interface SearchIndexStatistics {\n /**\n * The number of documents in the index.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly documentCount: number;\n /**\n * The amount of storage in bytes consumed by the index.\n * **NOTE: This property will not be serialized. It can only be populated by the server.**\n */\n readonly storageSize: number;\n /**\n * The amount of memory in bytes consumed by vectors in the index.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly vectorIndexSize: number;\n}\n\n/**\n * Response from a get service statistics request. If successful, it includes service level\n * counters and limits.\n */\nexport interface SearchServiceStatistics {\n /**\n * Service level resource counters.\n */\n counters: ServiceCounters;\n /**\n * Service level general limits.\n */\n limits: ServiceLimits;\n}\n\n/**\n * Options for get service statistics operation.\n */\nexport type GetServiceStatisticsOptions = OperationOptions;\n\n/**\n * Options for get indexer status operation.\n */\nexport type GetIndexerStatusOptions = OperationOptions;\n\n/**\n * Options for reset indexer operation.\n */\nexport type ResetIndexerOptions = OperationOptions;\n\n/**\n * Options for run indexer operation.\n */\nexport type RunIndexerOptions = OperationOptions;\n\n/**\n * Options for create index operation.\n */\nexport type CreateIndexOptions = OperationOptions;\n\n/**\n * Options for create skillset operation.\n */\nexport type CreateSkillsetOptions = OperationOptions;\n\n/**\n * Options for create alias operation.\n */\nexport type CreateAliasOptions = OperationOptions;\n\n/**\n * Options for create or update alias operation.\n */\nexport interface CreateOrUpdateAliasOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete alias operation.\n */\nexport interface DeleteAliasOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for get alias operation.\n */\nexport type GetAliasOptions = OperationOptions;\n\n/**\n * Options for list aliases operation.\n */\nexport type ListAliasesOptions = OperationOptions;\n\n/**\n * Options for create synonymmap operation.\n */\nexport type CreateSynonymMapOptions = OperationOptions;\n\n/**\n * Options for create indexer operation.\n */\nexport type CreateIndexerOptions = OperationOptions;\n\n/**\n * Options for create datasource operation.\n */\nexport type CreateDataSourceConnectionOptions = OperationOptions;\n\n/**\n * Options for create/update index operation.\n */\nexport interface CreateOrUpdateIndexOptions extends OperationOptions {\n /**\n * Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by\n * taking the index offline for at least a few seconds. This temporarily causes indexing and\n * query requests to fail. Performance and write availability of the index can be impaired for\n * several minutes after the index is updated, or longer for very large indexes.\n */\n allowIndexDowntime?: boolean;\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for reset docs operation.\n */\nexport interface ResetDocumentsOptions extends OperationOptions {\n /** document keys to be reset */\n documentKeys?: string[];\n /** datasource document identifiers to be reset */\n datasourceDocumentIds?: string[];\n /** If false, keys or ids will be appended to existing ones. If true, only the keys or ids in this payload will be queued to be re-ingested. */\n overwrite?: boolean;\n}\n\n/**\n * Options for reset skills operation.\n */\nexport interface ResetSkillsOptions extends OperationOptions {\n /** the names of skills to be reset. */\n skillNames?: string[];\n}\n\n/**\n * Options for create/update skillset operation.\n */\nexport interface CreateOrUpdateSkillsetOptions extends OperationOptions {\n /**\n * If set to true, Resource will be updated only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for create/update synonymmap operation.\n */\nexport interface CreateOrUpdateSynonymMapOptions extends OperationOptions {\n /**\n * If set to true, Resource will be updated only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for create/update indexer operation.\n */\nexport interface CreateorUpdateIndexerOptions extends OperationOptions {\n /**\n * If set to true, Resource will be updated only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for create/update datasource operation.\n */\nexport interface CreateorUpdateDataSourceConnectionOptions extends OperationOptions {\n /**\n * If set to true, Resource will be updated only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete index operation.\n */\nexport interface DeleteIndexOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete skillset operaion.\n */\nexport interface DeleteSkillsetOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete synonymmap operation.\n */\nexport interface DeleteSynonymMapOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete indexer operation.\n */\nexport interface DeleteIndexerOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Options for delete datasource operation.\n */\nexport interface DeleteDataSourceConnectionOptions extends OperationOptions {\n /**\n * If set to true, Resource will be deleted only if the etag matches.\n */\n onlyIfUnchanged?: boolean;\n}\n\n/**\n * Specifies some text and analysis components used to break that text into tokens.\n */\nexport interface AnalyzeRequest {\n /**\n * The text to break into tokens.\n */\n text: string;\n /**\n * The name of the analyzer to use to break the given text. If this parameter is not specified,\n * you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually\n * exclusive. {@link KnownAnalyzerNames} is an enum containing built-in analyzer names.\n * NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.\n */\n analyzerName?: LexicalAnalyzerName;\n /**\n * The name of the tokenizer to use to break the given text. If this parameter is not specified,\n * you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually\n * exclusive. {@link KnownTokenizerNames} is an enum containing built-in tokenizer names.\n * NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.\n */\n tokenizerName?: LexicalTokenizerName;\n /**\n * An optional list of token filters to use when breaking the given text. This parameter can only\n * be set when using the tokenizer parameter.\n */\n tokenFilters?: TokenFilterName[];\n /**\n * An optional list of character filters to use when breaking the given text. This parameter can\n * only be set when using the tokenizer parameter.\n */\n charFilters?: CharFilterName[];\n}\n\n/**\n * Options for analyze text operation.\n */\nexport type AnalyzeTextOptions = OperationOptions & AnalyzeRequest;\n\n// BEGIN manually modified generated interfaces\n//\n// This section is for places where we have to manually fix issues\n// with interfaces from the generated code.\n// One issue is that unions of discriminated types generated with\n// their abstract base class as a member.\n\n/**\n * Flexibly separates text into terms via a regular expression pattern. This analyzer is\n * implemented using Apache Lucene.\n */\nexport interface PatternAnalyzer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.PatternAnalyzer\";\n /**\n * The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores,\n * can only start and end with alphanumeric characters, and is limited to 128 characters.\n */\n name: string;\n /**\n * A value indicating whether terms should be lower-cased. Default is true. Default value: true.\n */\n lowerCaseTerms?: boolean;\n /**\n * A regular expression pattern to match token separators. Default is an expression that matches\n * one or more whitespace characters. Default value: `\\W+`.\n */\n pattern?: string;\n /**\n * Regular expression flags. Possible values include: 'CANON_EQ', 'CASE_INSENSITIVE', 'COMMENTS',\n * 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'\n */\n flags?: RegexFlags[];\n /**\n * A list of stopwords.\n */\n stopwords?: string[];\n}\n\n/**\n * Allows you to take control over the process of converting text into indexable/searchable tokens.\n * It's a user-defined configuration consisting of a single predefined tokenizer and one or more\n * filters. The tokenizer is responsible for breaking text into tokens, and the filters for\n * modifying tokens emitted by the tokenizer.\n */\nexport interface CustomAnalyzer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.CustomAnalyzer\";\n /**\n * The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores,\n * can only start and end with alphanumeric characters, and is limited to 128 characters.\n */\n name: string;\n /**\n * The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as\n * breaking a sentence into words. {@link KnownTokenizerNames} is an enum containing built-in tokenizer names.\n */\n tokenizerName: LexicalTokenizerName;\n /**\n * A list of token filters used to filter out or modify the tokens generated by a tokenizer. For\n * example, you can specify a lowercase filter that converts all characters to lowercase. The\n * filters are run in the order in which they are listed.\n */\n tokenFilters?: TokenFilterName[];\n /**\n * A list of character filters used to prepare input text before it is processed by the\n * tokenizer. For instance, they can replace certain characters or symbols. The filters are run\n * in the order in which they are listed.\n */\n charFilters?: CharFilterName[];\n}\n\n/**\n * Contains the possible cases for Analyzer.\n */\nexport type LexicalAnalyzer =\n | CustomAnalyzer\n | PatternAnalyzer\n | LuceneStandardAnalyzer\n | StopAnalyzer;\n\n/**\n * Contains the possible cases for LexicalNormalizer.\n */\nexport type LexicalNormalizer = BaseLexicalNormalizer | CustomLexicalNormalizer;\n\n/**\n * A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call\n * your custom code.\n */\nexport interface WebApiSkill extends BaseSearchIndexerSkill {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n odatatype: \"#Microsoft.Skills.Custom.WebApiSkill\";\n /**\n * The url for the Web API.\n */\n uri: string;\n /**\n * The headers required to make the http request.\n */\n httpHeaders?: { [propertyName: string]: string };\n /**\n * The method for the http request.\n */\n httpMethod?: string;\n /**\n * The desired timeout for the request. Default is 30 seconds.\n */\n timeout?: string;\n /**\n * The desired batch size which indicates number of documents.\n */\n batchSize?: number;\n /**\n * If set, the number of parallel calls that can be made to the Web API.\n */\n degreeOfParallelism?: number;\n /**\n * Applies to custom skills that connect to external code in an Azure function or some other\n * application that provides the transformations. This value should be the application ID\n * created for the function or app when it was registered with Azure Active Directory. When\n * specified, the custom skill connects to the function or app using a managed ID (either system\n * or user-assigned) of the search service and the access token of the function or app, using\n * this value as the resource id for creating the scope of the access token.\n */\n authResourceId?: string;\n /**\n * The user-assigned managed identity used for outbound connections. If an authResourceId is\n * provided and it's not specified, the system-assigned managed identity is used. On updates to\n * the indexer, if the identity is unspecified, the value remains unchanged. If undefined, the\n * value of this property is cleared.\n */\n authIdentity?: SearchIndexerDataIdentity;\n}\n\n/**\n * Contains the possible cases for Skill.\n */\nexport type SearchIndexerSkill =\n | AzureOpenAIEmbeddingSkill\n | ConditionalSkill\n | CustomEntityLookupSkill\n | DocumentExtractionSkill\n | DocumentIntelligenceLayoutSkill\n | EntityLinkingSkill\n | EntityRecognitionSkill\n | EntityRecognitionSkillV3\n | ImageAnalysisSkill\n | KeyPhraseExtractionSkill\n | LanguageDetectionSkill\n | MergeSkill\n | OcrSkill\n | PIIDetectionSkill\n | SentimentSkill\n | SentimentSkillV3\n | ShaperSkill\n | SplitSkill\n | TextTranslationSkill\n | WebApiSkill;\n\n/**\n * Contains the possible cases for CognitiveServicesAccount.\n */\nexport type CognitiveServicesAccount =\n | DefaultCognitiveServicesAccount\n | CognitiveServicesAccountKey;\n/**\n * Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is\n * implemented using Apache Lucene.\n */\nexport interface PatternTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype: \"#Microsoft.Azure.Search.PatternTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * A regular expression pattern to match token separators. Default is an expression that matches\n * one or more whitespace characters. Default value: `\\W+`.\n */\n pattern?: string;\n /**\n * Regular expression flags. Possible values include: 'CANON_EQ', 'CASE_INSENSITIVE', 'COMMENTS',\n * 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'\n */\n flags?: RegexFlags[];\n /**\n * The zero-based ordinal of the matching group in the regular expression pattern to extract into\n * tokens. Use -1 if you want to use the entire pattern to split the input into tokens,\n * irrespective of matching groups. Default is -1. Default value: -1.\n */\n group?: number;\n}\n/**\n * Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using\n * Apache Lucene.\n */\nexport interface LuceneStandardTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.StandardTokenizerV2\"\n | \"#Microsoft.Azure.Search.StandardTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The maximum token length. Default is 255. Tokens longer than the maximum length are split. The\n * maximum token length that can be used is 300 characters. Default value: 255.\n */\n maxTokenLength?: number;\n}\n\n/**\n * Generates n-grams of the given size(s) starting from the front or the back of an input token.\n * This token filter is implemented using Apache Lucene.\n */\nexport interface EdgeNGramTokenFilter {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.EdgeNGramTokenFilterV2\"\n | \"#Microsoft.Azure.Search.EdgeNGramTokenFilter\";\n /**\n * The name of the token filter. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of\n * maxGram. Default value: 1.\n */\n minGram?: number;\n /**\n * The maximum n-gram length. Default is 2. Maximum is 300. Default value: 2.\n */\n maxGram?: number;\n /**\n * Specifies which side of the input the n-gram should be generated from. Default is \"front\".\n * Possible values include: 'Front', 'Back'\n */\n side?: EdgeNGramTokenFilterSide;\n}\n\n/**\n * Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene.\n */\nexport interface KeywordTokenizer {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.KeywordTokenizerV2\"\n | \"#Microsoft.Azure.Search.KeywordTokenizer\";\n /**\n * The name of the tokenizer. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The maximum token length. Default is 256. Tokens longer than the maximum length are split. The\n * maximum token length that can be used is 300 characters. Default value: 256.\n */\n maxTokenLength?: number;\n}\n\n/**\n * Contains the possible cases for Tokenizer.\n */\nexport type LexicalTokenizer =\n | ClassicTokenizer\n | EdgeNGramTokenizer\n | KeywordTokenizer\n | MicrosoftLanguageTokenizer\n | MicrosoftLanguageStemmingTokenizer\n | NGramTokenizer\n | PathHierarchyTokenizer\n | PatternTokenizer\n | LuceneStandardTokenizer\n | UaxUrlEmailTokenizer;\n\n/**\n * Definition of additional projections to azure blob, table, or files, of enriched data.\n */\nexport interface SearchIndexerKnowledgeStore {\n /**\n * The connection string to the storage account projections will be stored in.\n */\n storageConnectionString: string;\n /**\n * A list of additional projections to perform during indexing.\n */\n projections: SearchIndexerKnowledgeStoreProjection[];\n /**\n * The user-assigned managed identity used for connections to Azure Storage when writing\n * knowledge store projections. If the connection string indicates an identity (ResourceId) and\n * it's not specified, the system-assigned managed identity is used. On updates to the indexer,\n * if the identity is unspecified, the value remains unchanged. If set to \"none\", the value of\n * this property is cleared.\n */\n identity?: SearchIndexerDataIdentity;\n}\n\n/**\n * Contains the possible cases for Similarity.\n */\nexport type SimilarityAlgorithm = ClassicSimilarity | BM25Similarity;\n\n/**\n * Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene.\n */\nexport interface NGramTokenFilter {\n /**\n * Polymorphic Discriminator\n */\n odatatype:\n | \"#Microsoft.Azure.Search.NGramTokenFilterV2\"\n | \"#Microsoft.Azure.Search.NGramTokenFilter\";\n /**\n * The name of the token filter. It must only contain letters, digits, spaces, dashes or\n * underscores, can only start and end with alphanumeric characters, and is limited to 128\n * characters.\n */\n name: string;\n /**\n * The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of\n * maxGram. Default value: 1.\n */\n minGram?: number;\n /**\n * The maximum n-gram length. Default is 2. Maximum is 300. Default value: 2.\n */\n maxGram?: number;\n}\n\n/**\n * Contains the possible cases for TokenFilter.\n */\nexport type TokenFilter =\n | AsciiFoldingTokenFilter\n | CjkBigramTokenFilter\n | CommonGramTokenFilter\n | DictionaryDecompounderTokenFilter\n | EdgeNGramTokenFilter\n | ElisionTokenFilter\n | KeepTokenFilter\n | KeywordMarkerTokenFilter\n | LengthTokenFilter\n | LimitTokenFilter\n | NGramTokenFilter\n | PatternCaptureTokenFilter\n | PatternReplaceTokenFilter\n | PhoneticTokenFilter\n | ShingleTokenFilter\n | SnowballTokenFilter\n | StemmerTokenFilter\n | StemmerOverrideTokenFilter\n | StopwordsTokenFilter\n | SynonymTokenFilter\n | TruncateTokenFilter\n | UniqueTokenFilter\n | WordDelimiterTokenFilter;\n\n/**\n * Contains the possible cases for CharFilter.\n */\nexport type CharFilter = MappingCharFilter | PatternReplaceCharFilter;\n\n/**\n * Contains the possible cases for ScoringFunction.\n */\nexport type ScoringFunction =\n | DistanceScoringFunction\n | FreshnessScoringFunction\n | MagnitudeScoringFunction\n | TagScoringFunction;\n\n/**\n * Defines values for ComplexDataType.\n * Possible values include: 'Edm.ComplexType', 'Collection(Edm.ComplexType)'\n * @readonly\n */\nexport type ComplexDataType = \"Edm.ComplexType\" | \"Collection(Edm.ComplexType)\";\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport type SearchField = SimpleField | ComplexField;\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport interface SimpleField {\n /**\n * The name of the field, which must be unique within the fields collection of the index or\n * parent field.\n */\n name: string;\n /**\n * The data type of the field.\n */\n type: SearchFieldDataType;\n /**\n * A value indicating whether the field uniquely identifies documents in the index. Exactly one\n * top-level field in each index must be chosen as the key field and it must be of type\n * Edm.String. Key fields can be used to look up documents directly and update or delete specific\n * documents. Default is false.\n */\n key?: boolean;\n /**\n * A value indicating whether the field can be returned in a search result. You can disable this\n * option if you want to use a field (for example, margin) as a filter, sorting, or scoring\n * mechanism but do not want the field to be visible to the end user. This property must be false\n * for key fields. This property can be changed on existing fields. Enabling this property does\n * not cause any increase in index storage requirements. Default is true for vector fields, false\n * otherwise.\n */\n hidden?: boolean;\n /**\n * An immutable value indicating whether the field will be persisted separately on disk to be\n * returned in a search result. You can disable this option if you don't plan to return the field\n * contents in a search response to save on storage overhead. This can only be set during index\n * creation and only for vector fields. This property cannot be changed for existing fields or set\n * as false for new fields. If this property is set as false, the property 'hidden' must be set to\n * 'true'. This property must be true or unset for key fields, for new fields, and for non-vector\n * fields. Disabling this property will reduce index storage requirements. The default is true for vector fields.\n */\n stored?: boolean;\n /**\n * A value indicating whether the field is full-text searchable. This means it will undergo\n * analysis such as word-breaking during indexing. If you set a searchable field to a value like\n * \"sunny day\", internally it will be split into the individual tokens \"sunny\" and \"day\". This\n * enables full-text searches for these terms. Fields of type Edm.String or Collection(Edm.String)\n * are searchable by default. This property must be false for simple fields of other non-string\n * data types. Note: searchable fields consume extra space\n * in your index to accommodate additional tokenized versions of the field value for full-text\n * searches. If you want to save space in your index and you don't need a field to be included in\n * searches, set searchable to false. Default is false.\n */\n searchable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in $filter queries. filterable\n * differs from searchable in how strings are handled. Fields of type Edm.String or\n * Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are for\n * exact matches only. For example, if you set such a field f to \"sunny day\", $filter=f eq 'sunny'\n * will find no matches, but $filter=f eq 'sunny day' will. Default is false.\n */\n filterable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in $orderby expressions. By\n * default, the search engine sorts results by score, but in many experiences users will want to\n * sort by fields in the documents. A simple field can be sortable only if it is single-valued (it\n * has a single value in the scope of the parent document). Simple collection fields cannot be\n * sortable, since they are multi-valued. Simple sub-fields of complex collections are also\n * multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent\n * field, or an ancestor field, that's the complex collection. The default is false.\n *\n */\n sortable?: boolean;\n /**\n * A value indicating whether to enable the field to be referenced in facet queries. Typically\n * used in a presentation of search results that includes hit count by category (for example,\n * search for digital cameras and see hits by brand, by megapixels, by price, and so on). Fields\n * of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. Default is\n * false.\n */\n facetable?: boolean;\n /**\n * The name of the analyzer to use for the field. This option can be used only with searchable\n * fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the\n * analyzer is chosen, it cannot be changed for the field.\n */\n analyzerName?: LexicalAnalyzerName;\n /**\n * The name of the analyzer used at search time for the field. This option can be used only with\n * searchable fields. It must be set together with `indexAnalyzerName` and it cannot be set\n * together with the `analyzerName` option. This property cannot be set to the name of a language\n * analyzer; use the `analyzerName` property instead if you need a language analyzer. This\n * analyzer can be updated on an existing field.\n */\n searchAnalyzerName?: LexicalAnalyzerName;\n /**\n * The name of the analyzer used at indexing time for the field. This option can be used only\n * with searchable fields. It must be set together with searchAnalyzer and it cannot be set\n * together with the analyzer option. This property cannot be set to the name of a language\n * analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer\n * is chosen, it cannot be changed for the field.\n */\n indexAnalyzerName?: LexicalAnalyzerName;\n /**\n * A list of the names of synonym maps to associate with this field. This option can be used only\n * with searchable fields. Currently only one synonym map per field is supported. Assigning a\n * synonym map to a field ensures that query terms targeting that field are expanded at query-time\n * using the rules in the synonym map. This attribute can be changed on existing fields.\n */\n synonymMapNames?: string[];\n /**\n * The dimensionality of the vector field.\n */\n vectorSearchDimensions?: number;\n /**\n * The name of the vector search profile that specifies the algorithm and vectorizer to use when\n * searching the vector field.\n */\n vectorSearchProfileName?: string;\n /**\n * The encoding format to interpret the field contents.\n */\n vectorEncodingFormat?: VectorEncodingFormat;\n /** The name of the normalizer to use for the field. This option can be used only with fields with\n * filterable, sortable, or facetable enabled. Once the normalizer is chosen, it cannot be changed\n * for the field. Must be null for complex fields.\n */\n normalizerName?: LexicalNormalizerName;\n}\n\nexport function isComplexField(field: SearchField): field is ComplexField {\n return field.type === \"Edm.ComplexType\" || field.type === \"Collection(Edm.ComplexType)\";\n}\n\n/**\n * Represents a field in an index definition, which describes the name, data type, and search\n * behavior of a field.\n */\nexport interface ComplexField {\n /**\n * The name of the field, which must be unique within the fields collection of the index or\n * parent field.\n */\n name: string;\n /**\n * The data type of the field.\n * Possible values include: 'Edm.ComplexType','Collection(Edm.ComplexType)'\n */\n type: ComplexDataType;\n /**\n * A list of sub-fields.\n */\n fields?: SearchField[];\n}\n\n/**\n * Represents a synonym map definition.\n */\nexport interface SynonymMap {\n /**\n * The name of the synonym map.\n */\n name: string;\n /**\n * An array of synonym rules in the specified synonym map format.\n */\n synonyms: string[];\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your data when you want full assurance\n * that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you\n * have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore\n * attempts to set this property to null. You can change this property as needed if you want to\n * rotate your encryption key; Your data will be unaffected. Encryption with customer-managed\n * keys is not available for free search services, and is only available for paid services\n * created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n /**\n * The ETag of the synonym map.\n */\n etag?: string;\n}\n\n/**\n * An iterator for listing the indexes that exist in the Search service. Will make requests\n * as needed during iteration. Use .byPage() to make one request to the server\n * per iteration.\n */\nexport type IndexIterator = PagedAsyncIterableIterator<SearchIndex, SearchIndex[], {}>;\n\n/**\n * An iterator for listing the indexes that exist in the Search service. Will make requests\n * as needed during iteration. Use .byPage() to make one request to the server\n * per iteration.\n */\nexport type IndexNameIterator = PagedAsyncIterableIterator<string, string[], {}>;\n\n/**\n * Represents a search index definition, which describes the fields and search behavior of an\n * index.\n */\nexport interface SearchIndex {\n /**\n * The name of the index.\n */\n name: string;\n /**\n * The description of the index.\n */\n description?: string;\n /**\n * The fields of the index.\n */\n fields: SearchField[];\n /**\n * The scoring profiles for the index.\n */\n scoringProfiles?: ScoringProfile[];\n /**\n * The name of the scoring profile to use if none is specified in the query. If this property is\n * not set and no scoring profile is specified in the query, then default scoring (tf-idf) will\n * be used.\n */\n defaultScoringProfile?: string;\n /**\n * Options to control Cross-Origin Resource Sharing (CORS) for the index.\n */\n corsOptions?: CorsOptions;\n /**\n * The suggesters for the index.\n */\n suggesters?: SearchSuggester[];\n /**\n * The analyzers for the index.\n */\n analyzers?: LexicalAnalyzer[];\n /**\n * The tokenizers for the index.\n */\n tokenizers?: LexicalTokenizer[];\n /**\n * The normalizers for the index.\n */\n normalizers?: LexicalNormalizer[];\n /**\n * The token filters for the index.\n */\n tokenFilters?: TokenFilter[];\n /**\n * The character filters for the index.\n */\n charFilters?: CharFilter[];\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your data when you want full assurance\n * that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you\n * have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore\n * attempts to set this property to null. You can change this property as needed if you want to\n * rotate your encryption key; Your data will be unaffected. Encryption with customer-managed\n * keys is not available for free search services, and is only available for paid services\n * created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n /**\n * The type of similarity algorithm to be used when scoring and ranking the documents matching a\n * search query. The similarity algorithm can only be defined at index creation time and cannot\n * be modified on existing indexes. If null, the ClassicSimilarity algorithm is used.\n */\n similarity?: SimilarityAlgorithm;\n /**\n * Defines parameters for a search index that influence semantic capabilities.\n */\n semanticSearch?: SemanticSearch;\n /**\n * Contains configuration options related to vector search.\n */\n vectorSearch?: VectorSearch;\n /**\n * The ETag of the index.\n */\n etag?: string;\n}\n\nexport interface SearchIndexerCache {\n /**\n * The connection string to the storage account where the cache data will be persisted.\n */\n storageConnectionString?: string;\n /**\n * Specifies whether incremental reprocessing is enabled.\n */\n enableReprocessing?: boolean;\n /** The user-assigned managed identity used for connections to the enrichment cache. If the\n * connection string indicates an identity (ResourceId) and it's not specified, the\n * system-assigned managed identity is used. On updates to the indexer, if the identity is\n * unspecified, the value remains unchanged. If set to \"none\", the value of this property is\n * cleared.\n */\n identity?: SearchIndexerDataIdentity;\n}\n\n/**\n * Represents an indexer.\n */\nexport interface SearchIndexer {\n /**\n * The name of the indexer.\n */\n name: string;\n /**\n * The description of the indexer.\n */\n description?: string;\n /**\n * The name of the datasource from which this indexer reads data.\n */\n dataSourceName: string;\n /**\n * The name of the skillset executing with this indexer.\n */\n skillsetName?: string;\n /**\n * The name of the index to which this indexer writes data.\n */\n targetIndexName: string;\n /**\n * The schedule for this indexer.\n */\n schedule?: IndexingSchedule;\n /**\n * Parameters for indexer execution.\n */\n parameters?: IndexingParameters;\n /**\n * Defines mappings between fields in the data source and corresponding target fields in the\n * index.\n */\n fieldMappings?: FieldMapping[];\n /**\n * Output field mappings are applied after enrichment and immediately before indexing.\n */\n outputFieldMappings?: FieldMapping[];\n /**\n * A value indicating whether the indexer is disabled. Default is false. Default value: false.\n */\n isDisabled?: boolean;\n /**\n * The ETag of the indexer.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your indexer definition (as well as\n * indexer execution status) when you want full assurance that no one, not even Microsoft, can\n * decrypt them in Azure Cognitive Search. Once you have encrypted your indexer definition, it\n * will always remain encrypted. Azure Cognitive Search will ignore attempts to set this property\n * to null. You can change this property as needed if you want to rotate your encryption key;\n * Your indexer definition (and indexer execution status) will be unaffected. Encryption with\n * customer-managed keys is not available for free search services, and is only available for\n * paid services created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n\n/**\n * A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be\n * used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym\n * maps.\n */\nexport interface SearchResourceEncryptionKey {\n /**\n * The name of your Azure Key Vault key to be used to encrypt your data at rest.\n */\n keyName: string;\n /**\n * The version of your Azure Key Vault key to be used to encrypt your data at rest.\n */\n keyVersion: string;\n /**\n * The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be\n * used to encrypt your data at rest. An example URI might be\n * https://my-keyvault-name.vault.azure.net.\n */\n vaultUrl: string;\n /**\n * An AAD Application ID that was granted the required access permissions to the Azure Key Vault\n * that is to be used when encrypting your data at rest. The Application ID should not be\n * confused with the Object ID for your AAD Application.\n */\n applicationId?: string;\n /**\n * The authentication key of the specified AAD application.\n */\n applicationSecret?: string;\n /**\n * An explicit managed identity to use for this encryption key. If not specified and the access\n * credentials property is null, the system-assigned managed identity is used. On update to the\n * resource, if the explicit identity is unspecified, it remains unchanged. If \"none\" is specified,\n * the value of this property is cleared.\n */\n identity?: SearchIndexerDataIdentity;\n}\n\n/**\n * A list of skills.\n */\nexport interface SearchIndexerSkillset {\n /**\n * The name of the skillset.\n */\n name: string;\n /**\n * The description of the skillset.\n */\n description?: string;\n /**\n * A list of skills in the skillset.\n */\n skills: SearchIndexerSkill[];\n /**\n * Details about cognitive services to be used when running skills.\n */\n cognitiveServicesAccount?: CognitiveServicesAccount;\n /**\n * Definition of additional projections to azure blob, table, or files, of enriched data.\n */\n knowledgeStore?: SearchIndexerKnowledgeStore;\n /**\n * Definition of additional projections to secondary search index(es).\n */\n indexProjection?: SearchIndexerIndexProjection;\n /**\n * The ETag of the skillset.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your skillset definition when you want\n * full assurance that no one, not even Microsoft, can decrypt your skillset definition in Azure\n * Cognitive Search. Once you have encrypted your skillset definition, it will always remain\n * encrypted. Azure Cognitive Search will ignore attempts to set this property to null. You can\n * change this property as needed if you want to rotate your encryption key; Your skillset\n * definition will be unaffected. Encryption with customer-managed keys is not available for free\n * search services, and is only available for paid services created on or after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n\n/**\n * Defines parameters for a search index that influence scoring in search queries.\n */\nexport interface ScoringProfile {\n /**\n * The name of the scoring profile.\n */\n name: string;\n /**\n * Parameters that boost scoring based on text matches in certain index fields.\n */\n textWeights?: TextWeights;\n /**\n * The collection of functions that influence the scoring of documents.\n */\n functions?: ScoringFunction[];\n /**\n * A value indicating how the results of individual scoring functions should be combined.\n * Defaults to \"Sum\". Ignored if there are no scoring functions. Possible values include: 'sum',\n * 'average', 'minimum', 'maximum', 'firstMatching'\n */\n functionAggregation?: ScoringFunctionAggregation;\n}\n\n/**\n * Defines values for TokenizerName.\n * @readonly\n */\nexport enum KnownTokenizerNames {\n /**\n * Grammar-based tokenizer that is suitable for processing most European-language documents. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html\n */\n Classic = \"classic\",\n /**\n * Tokenizes the input from an edge into n-grams of the given size(s). See\n * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.html\n */\n EdgeNGram = \"edgeNGram\",\n /**\n * Emits the entire input as a single token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordTokenizer.html\n */\n Keyword = \"keyword_v2\",\n /**\n * Divides text at non-letters. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LetterTokenizer.html\n */\n Letter = \"letter\",\n /**\n * Divides text at non-letters and converts them to lower case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseTokenizer.html\n */\n Lowercase = \"lowercase\",\n /**\n * Divides text using language-specific rules.\n */\n // eslint-disable-next-line @typescript-eslint/no-shadow\n MicrosoftLanguageTokenizer = \"microsoft_language_tokenizer\",\n /**\n * Divides text using language-specific rules and reduces words to their base forms.\n */\n // eslint-disable-next-line @typescript-eslint/no-shadow\n MicrosoftLanguageStemmingTokenizer = \"microsoft_language_stemming_tokenizer\",\n /**\n * Tokenizes the input into n-grams of the given size(s). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html\n */\n NGram = \"nGram\",\n /**\n * Tokenizer for path-like hierarchies. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/path/PathHierarchyTokenizer.html\n */\n PathHierarchy = \"path_hierarchy_v2\",\n /**\n * Tokenizer that uses regex pattern matching to construct distinct tokens. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html\n */\n Pattern = \"pattern\",\n /**\n * Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop\n * filter. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html\n */\n Standard = \"standard_v2\",\n /**\n * Tokenizes urls and emails as one token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html\n */\n UaxUrlEmail = \"uax_url_email\",\n /**\n * Divides text at whitespace. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html\n */\n Whitespace = \"whitespace\",\n}\n\n/**\n * Defines values for TokenFilterName.\n * @readonly\n */\nexport enum KnownTokenFilterNames {\n /**\n * A token filter that applies the Arabic normalizer to normalize the orthography. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html\n */\n ArabicNormalization = \"arabic_normalization\",\n /**\n * Strips all characters after an apostrophe (including the apostrophe itself). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html\n */\n Apostrophe = \"apostrophe\",\n /**\n * Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127\n * ASCII characters (the \"Basic Latin\" Unicode block) into their ASCII equivalents, if such\n * equivalents exist. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html\n */\n AsciiFolding = \"asciifolding\",\n /**\n * Forms bigrams of CJK terms that are generated from StandardTokenizer. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html\n */\n CjkBigram = \"cjk_bigram\",\n /**\n * Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic\n * Latin, and half-width Katakana variants into the equivalent Kana. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html\n */\n CjkWidth = \"cjk_width\",\n /**\n * Removes English possessives, and dots from acronyms. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html\n */\n Classic = \"classic\",\n /**\n * Construct bigrams for frequently occurring terms while indexing. Single terms are still\n * indexed too, with bigrams overlaid. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html\n */\n CommonGram = \"common_grams\",\n /**\n * Generates n-grams of the given size(s) starting from the front or the back of an input token.\n * See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html\n */\n EdgeNGram = \"edgeNGram_v2\",\n /**\n * Removes elisions. For example, \"l'avion\" (the plane) will be converted to \"avion\" (plane). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html\n */\n Elision = \"elision\",\n /**\n * Normalizes German characters according to the heuristics of the German2 snowball algorithm.\n * See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html\n */\n GermanNormalization = \"german_normalization\",\n /**\n * Normalizes text in Hindi to remove some differences in spelling variations. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html\n */\n HindiNormalization = \"hindi_normalization\",\n /**\n * Normalizes the Unicode representation of text in Indian languages. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html\n */\n IndicNormalization = \"indic_normalization\",\n /**\n * Emits each incoming token twice, once as keyword and once as non-keyword. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html\n */\n KeywordRepeat = \"keyword_repeat\",\n /**\n * A high-performance kstem filter for English. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html\n */\n KStem = \"kstem\",\n /**\n * Removes words that are too long or too short. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html\n */\n Length = \"length\",\n /**\n * Limits the number of tokens while indexing. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html\n */\n Limit = \"limit\",\n /**\n * Normalizes token text to lower case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm\n */\n Lowercase = \"lowercase\",\n /**\n * Generates n-grams of the given size(s). See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html\n */\n NGram = \"nGram_v2\",\n /**\n * Applies normalization for Persian. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html\n */\n PersianNormalization = \"persian_normalization\",\n /**\n * Create tokens for phonetic matches. See\n * https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html\n */\n Phonetic = \"phonetic\",\n /**\n * Uses the Porter stemming algorithm to transform the token stream. See\n * http://tartarus.org/~martin/PorterStemmer\n */\n PorterStem = \"porter_stem\",\n /**\n * Reverses the token string. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html\n */\n Reverse = \"reverse\",\n /**\n * Normalizes use of the interchangeable Scandinavian characters. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html\n */\n ScandinavianNormalization = \"scandinavian_normalization\",\n /**\n * Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use\n * of double vowels aa, ae, ao, oe and oo, leaving just the first one. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html\n */\n ScandinavianFoldingNormalization = \"scandinavian_folding\",\n /**\n * Creates combinations of tokens as a single token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html\n */\n Shingle = \"shingle\",\n /**\n * A filter that stems words using a Snowball-generated stemmer. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html\n */\n Snowball = \"snowball\",\n /**\n * Normalizes the Unicode representation of Sorani text. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html\n */\n SoraniNormalization = \"sorani_normalization\",\n /**\n * Language specific stemming filter. See\n * https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters\n */\n Stemmer = \"stemmer\",\n /**\n * Removes stop words from a token stream. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html\n */\n Stopwords = \"stopwords\",\n /**\n * Trims leading and trailing whitespace from tokens. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html\n */\n Trim = \"trim\",\n /**\n * Truncates the terms to a specific length. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html\n */\n Truncate = \"truncate\",\n /**\n * Filters out tokens with same text as the previous token. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html\n */\n Unique = \"unique\",\n /**\n * Normalizes token text to upper case. See\n * http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html\n */\n Uppercase = \"uppercase\",\n /**\n * Splits words into subwords and performs optional transformations on subword groups.\n */\n WordDelimiter = \"word_delimiter\",\n}\n\n/**\n * Defines values for CharFilterName.\n * @readonly\n */\nexport enum KnownCharFilterNames {\n /**\n * A character filter that attempts to strip out HTML constructs. See\n * https://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.html\n */\n HtmlStrip = \"html_strip\",\n}\n\n/**\n * Defines values for AnalyzerName.\n * See https://docs.microsoft.com/rest/api/searchservice/Language-support\n * @readonly\n */\nexport enum KnownAnalyzerNames {\n /**\n * Arabic\n */\n ArMicrosoft = \"ar.microsoft\",\n /**\n * Arabic\n */\n ArLucene = \"ar.lucene\",\n /**\n * Armenian\n */\n HyLucene = \"hy.lucene\",\n /**\n * Bangla\n */\n BnMicrosoft = \"bn.microsoft\",\n /**\n * Basque\n */\n EuLucene = \"eu.lucene\",\n /**\n * Bulgarian\n */\n BgMicrosoft = \"bg.microsoft\",\n /**\n * Bulgarian\n */\n BgLucene = \"bg.lucene\",\n /**\n * Catalan\n */\n CaMicrosoft = \"ca.microsoft\",\n /**\n * Catalan\n */\n CaLucene = \"ca.lucene\",\n /**\n * Chinese Simplified\n */\n ZhHansMicrosoft = \"zh-Hans.microsoft\",\n /**\n * Chinese Simplified\n */\n ZhHansLucene = \"zh-Hans.lucene\",\n /**\n * Chinese Traditional\n */\n ZhHantMicrosoft = \"zh-Hant.microsoft\",\n /**\n * Chinese Traditional\n */\n ZhHantLucene = \"zh-Hant.lucene\",\n /**\n * Croatian\n */\n HrMicrosoft = \"hr.microsoft\",\n /**\n * Czech\n */\n CsMicrosoft = \"cs.microsoft\",\n /**\n * Czech\n */\n CsLucene = \"cs.lucene\",\n /**\n * Danish\n */\n DaMicrosoft = \"da.microsoft\",\n /**\n * Danish\n */\n DaLucene = \"da.lucene\",\n /**\n * Dutch\n */\n NlMicrosoft = \"nl.microsoft\",\n /**\n * Dutch\n */\n NlLucene = \"nl.lucene\",\n /**\n * English\n */\n EnMicrosoft = \"en.microsoft\",\n /**\n * English\n */\n EnLucene = \"en.lucene\",\n /**\n * Estonian\n */\n EtMicrosoft = \"et.microsoft\",\n /**\n * Finnish\n */\n FiMicrosoft = \"fi.microsoft\",\n /**\n * Finnish\n */\n FiLucene = \"fi.lucene\",\n /**\n * French\n */\n FrMicrosoft = \"fr.microsoft\",\n /**\n * French\n */\n FrLucene = \"fr.lucene\",\n /**\n * Galician\n */\n GlLucene = \"gl.lucene\",\n /**\n * German\n */\n DeMicrosoft = \"de.microsoft\",\n /**\n * German\n */\n DeLucene = \"de.lucene\",\n /**\n * Greek\n */\n ElMicrosoft = \"el.microsoft\",\n /**\n * Greek\n */\n ElLucene = \"el.lucene\",\n /**\n * Gujarati\n */\n GuMicrosoft = \"gu.microsoft\",\n /**\n * Hebrew\n */\n HeMicrosoft = \"he.microsoft\",\n /**\n * Hindi\n */\n HiMicrosoft = \"hi.microsoft\",\n /**\n * Hindi\n */\n HiLucene = \"hi.lucene\",\n /**\n * Hungarian\n */\n HuMicrosoft = \"hu.microsoft\",\n /**\n * Hungarian\n */\n HuLucene = \"hu.lucene\",\n /**\n * Icelandic\n */\n IsMicrosoft = \"is.microsoft\",\n /**\n * Indonesian (Bahasa)\n */\n IdMicrosoft = \"id.microsoft\",\n /**\n * Indonesian (Bahasa)\n */\n IdLucene = \"id.lucene\",\n /**\n * Irish\n */\n GaLucene = \"ga.lucene\",\n /**\n * Italian\n */\n ItMicrosoft = \"it.microsoft\",\n /**\n * Italian\n */\n ItLucene = \"it.lucene\",\n /**\n * Japanese\n */\n JaMicrosoft = \"ja.microsoft\",\n /**\n * Japanese\n */\n JaLucene = \"ja.lucene\",\n /**\n * Kannada\n */\n KnMicrosoft = \"kn.microsoft\",\n /**\n * Korean\n */\n KoMicrosoft = \"ko.microsoft\",\n /**\n * Korean\n */\n KoLucene = \"ko.lucene\",\n /**\n * Latvian\n */\n LvMicrosoft = \"lv.microsoft\",\n /**\n * Latvian\n */\n LvLucene = \"lv.lucene\",\n /**\n * Lithuanian\n */\n LtMicrosoft = \"lt.microsoft\",\n /**\n * Malayalam\n */\n MlMicrosoft = \"ml.microsoft\",\n /**\n * Malay (Latin)\n */\n MsMicrosoft = \"ms.microsoft\",\n /**\n * Marathi\n */\n MrMicrosoft = \"mr.microsoft\",\n /**\n * Norwegian\n */\n NbMicrosoft = \"nb.microsoft\",\n /**\n * Norwegian\n */\n NoLucene = \"no.lucene\",\n /**\n * Persian\n */\n FaLucene = \"fa.lucene\",\n /**\n * Polish\n */\n PlMicrosoft = \"pl.microsoft\",\n /**\n * Polish\n */\n PlLucene = \"pl.lucene\",\n /**\n * Portuguese (Brazil)\n */\n PtBRMicrosoft = \"pt-BR.microsoft\",\n /**\n * Portuguese (Brazil)\n */\n PtBRLucene = \"pt-BR.lucene\",\n /**\n * Portuguese (Portugal)\n */\n PtPTMicrosoft = \"pt-PT.microsoft\",\n /**\n * Portuguese (Portugal)\n */\n PtPTLucene = \"pt-PT.lucene\",\n /**\n * Punjabi\n */ PaMicrosoft = \"pa.microsoft\",\n /**\n * Romanian\n */\n RoMicrosoft = \"ro.microsoft\",\n /**\n * Romanian\n */\n RoLucene = \"ro.lucene\",\n /**\n * Russian\n */\n RuMicrosoft = \"ru.microsoft\",\n /**\n * Russian\n */\n RuLucene = \"ru.lucene\",\n /**\n * Serbian (Cyrillic)\n */\n SrCyrillicMicrosoft = \"sr-cyrillic.microsoft\",\n /**\n * Serbian (Latin)\n */\n SrLatinMicrosoft = \"sr-latin.microsoft\",\n /**\n * Slovak\n */\n SkMicrosoft = \"sk.microsoft\",\n /**\n * Slovenian\n */\n SlMicrosoft = \"sl.microsoft\",\n /**\n * Spanish\n */\n EsMicrosoft = \"es.microsoft\",\n /**\n * Spanish\n */\n EsLucene = \"es.lucene\",\n /**\n * Swedish\n */\n SvMicrosoft = \"sv.microsoft\",\n /**\n * Swedish\n */\n SvLucene = \"sv.lucene\",\n /**\n * Tamil\n */\n TaMicrosoft = \"ta.microsoft\",\n /**\n * Telugu\n */\n TeMicrosoft = \"te.microsoft\",\n /**\n * Thai\n */\n ThMicrosoft = \"th.microsoft\",\n /**\n * Thai\n */\n ThLucene = \"th.lucene\",\n /**\n * Turkish\n */\n TrMicrosoft = \"tr.microsoft\",\n /**\n * Turkish\n */\n TrLucene = \"tr.lucene\",\n /**\n * Ukrainian\n */\n UkMicrosoft = \"uk.microsoft\",\n /**\n * Urdu\n */\n UrMicrosoft = \"ur.microsoft\",\n /**\n * Vietnamese\n */\n ViMicrosoft = \"vi.microsoft\",\n /**\n * See: https://lucene.apache.org/core/6_6_1/core/org/apache/lucene/analysis/standard/StandardAnalyzer.html\n */\n StandardLucene = \"standard.lucene\",\n /**\n * See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html\n */\n StandardAsciiFoldingLucene = \"standardasciifolding.lucene\",\n /**\n * Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names.\n */\n Keyword = \"keyword\",\n /**\n * Flexibly separates text into terms via a regular expression pattern.\n */\n Pattern = \"pattern\",\n /**\n * Divides text at non-letters and converts them to lower case.\n */\n Simple = \"simple\",\n /**\n * Divides text at non-letters; Applies the lowercase and stopword token filters.\n */\n Stop = \"stop\",\n /**\n * An analyzer that uses the whitespace tokenizer.\n */\n Whitespace = \"whitespace\",\n}\n\n/**\n * Contains the possible cases for DataChangeDetectionPolicy.\n */\nexport type DataChangeDetectionPolicy =\n | HighWaterMarkChangeDetectionPolicy\n | SqlIntegratedChangeTrackingPolicy;\n\n/**\n * Contains the possible cases for SearchIndexerDataIdentity.\n */\nexport type SearchIndexerDataIdentity =\n | SearchIndexerDataNoneIdentity\n | SearchIndexerDataUserAssignedIdentity;\n\n/**\n * Contains the possible cases for DataDeletionDetectionPolicy.\n */\nexport type DataDeletionDetectionPolicy = SoftDeleteColumnDeletionDetectionPolicy;\n\n/**\n * Represents a datasource definition, which can be used to configure an indexer.\n */\nexport interface SearchIndexerDataSourceConnection {\n /**\n * The name of the datasource.\n */\n name: string;\n /**\n * The description of the datasource.\n */\n description?: string;\n /**\n * The type of the datasource. Possible values include: 'AzureSql', 'CosmosDb', 'AzureBlob',\n * 'AzureTable', 'MySql', 'AdlsGen2'\n */\n type: SearchIndexerDataSourceType;\n /**\n * The connection string for the datasource.\n */\n connectionString?: string;\n /**\n * The data container for the datasource.\n */\n container: SearchIndexerDataContainer;\n /**\n * An explicit managed identity to use for this datasource. If not specified and the connection\n * string is a managed identity, the system-assigned managed identity is used. If not specified,\n * the value remains unchanged. If \"none\" is specified, the value of this property is cleared.\n */\n identity?: SearchIndexerDataIdentity;\n /**\n * The data change detection policy for the datasource.\n */\n dataChangeDetectionPolicy?: DataChangeDetectionPolicy;\n /**\n * The data deletion detection policy for the datasource.\n */\n dataDeletionDetectionPolicy?: DataDeletionDetectionPolicy;\n /**\n * The ETag of the DataSource.\n */\n etag?: string;\n /**\n * A description of an encryption key that you create in Azure Key Vault. This key is used to\n * provide an additional level of encryption-at-rest for your datasource definition when you want\n * full assurance that no one, not even Microsoft, can decrypt your data source definition in\n * Azure Cognitive Search. Once you have encrypted your data source definition, it will always\n * remain encrypted. Azure Cognitive Search will ignore attempts to set this property to null.\n * You can change this property as needed if you want to rotate your encryption key; Your\n * datasource definition will be unaffected. Encryption with customer-managed keys is not\n * available for free search services, and is only available for paid services created on or\n * after January 1, 2019.\n */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n\n/** Contains configuration options related to vector search. */\nexport interface VectorSearch {\n /** Defines combinations of configurations to use with vector search. */\n profiles?: VectorSearchProfile[];\n /** Contains configuration options specific to the algorithm used during indexing or querying. */\n algorithms?: VectorSearchAlgorithmConfiguration[];\n /** Contains configuration options on how to vectorize text vector queries. */\n vectorizers?: VectorSearchVectorizer[];\n /** Contains configuration options specific to the compression method used during indexing or querying. */\n compressions?: VectorSearchCompression[];\n}\n\n/** Contains configuration options specific to the algorithm used during indexing and/or querying. */\nexport type VectorSearchAlgorithmConfiguration =\n | HnswAlgorithmConfiguration\n | ExhaustiveKnnAlgorithmConfiguration;\n\n/** Contains configuration options specific to the algorithm used during indexing and/or querying. */\nexport interface BaseVectorSearchAlgorithmConfiguration {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: VectorSearchAlgorithmKind;\n /** The name to associate with this particular configuration. */\n name: string;\n}\n\n/**\n * Contains configuration options specific to the hnsw approximate nearest neighbors algorithm\n * used during indexing time.\n */\nexport type HnswAlgorithmConfiguration = BaseVectorSearchAlgorithmConfiguration & {\n /**\n * Polymorphic discriminator, which specifies the different types this object can be\n */\n kind: \"hnsw\";\n /**\n * Contains the parameters specific to hnsw algorithm.\n *\n */\n parameters?: HnswParameters;\n};\n\n/**\n * Contains the parameters specific to hnsw algorithm.\n */\nexport interface HnswParameters {\n /**\n * The number of bi-directional links created for every new element during construction.\n * Increasing this parameter value may improve recall and reduce retrieval times for datasets\n * with high intrinsic dimensionality at the expense of increased memory consumption and longer\n * indexing time.\n */\n m?: number;\n /**\n * The size of the dynamic list containing the nearest neighbors, which is used during index\n * time. Increasing this parameter may improve index quality, at the expense of increased\n * indexing time. At a certain point, increasing this parameter leads to diminishing returns.\n */\n efConstruction?: number;\n /**\n * The size of the dynamic list containing the nearest neighbors, which is used during search\n * time. Increasing this parameter may improve search results, at the expense of slower search.\n * Increasing this parameter leads to diminishing returns.\n */\n efSearch?: number;\n /**\n * The similarity metric to use for vector comparisons.\n */\n metric?: VectorSearchAlgorithmMetric;\n}\n\n/** Contains configuration options specific to the exhaustive KNN algorithm used during querying, which will perform brute-force search across the entire vector index. */\nexport type ExhaustiveKnnAlgorithmConfiguration = BaseVectorSearchAlgorithmConfiguration & {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"exhaustiveKnn\";\n /** Contains the parameters specific to exhaustive KNN algorithm. */\n parameters?: ExhaustiveKnnParameters;\n};\n\n/** Contains the parameters specific to exhaustive KNN algorithm. */\nexport interface ExhaustiveKnnParameters {\n /** The similarity metric to use for vector comparisons. */\n metric?: VectorSearchAlgorithmMetric;\n}\n\n/** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */\nexport interface SearchIndexerIndexProjectionParameters {\n /** Describes unknown properties.*/\n [property: string]: unknown;\n /** Defines behavior of the index projections in relation to the rest of the indexer. */\n projectionMode?: IndexProjectionMode;\n}\n\n/** Definition of additional projections to secondary search indexes. */\nexport interface SearchIndexerIndexProjection {\n /** A list of projections to be performed to secondary search indexes. */\n selectors: SearchIndexerIndexProjectionSelector[];\n /** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */\n parameters?: SearchIndexerIndexProjectionParameters;\n}\n\n/** Contains specific details for a vectorization method to be used during query time. */\nexport interface BaseVectorSearchVectorizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: VectorSearchVectorizerKind;\n /** The name to associate with this particular vectorization method. */\n vectorizerName: string;\n}\n\n/** Contains the parameters specific to using an Azure Open AI service for vectorization at query time. */\nexport interface AzureOpenAIVectorizer extends BaseVectorSearchVectorizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"azureOpenAI\";\n /** Contains the parameters specific to Azure Open AI embedding vectorization. */\n parameters?: AzureOpenAIParameters;\n}\n\n/** Specifies a user-defined vectorizer for generating the vector embedding of a query string. Integration of an external vectorizer is achieved using the custom Web API interface of a skillset. */\nexport interface WebApiVectorizer extends BaseVectorSearchVectorizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"customWebApi\";\n /** Specifies the properties of the user-defined vectorizer. */\n parameters?: WebApiParameters;\n}\n\n/** Specifies the properties for connecting to a user-defined vectorizer. */\nexport interface WebApiParameters {\n /** The URI of the Web API providing the vectorizer. */\n uri?: string;\n /** The headers required to make the HTTP request. */\n httpHeaders?: { [propertyName: string]: string };\n /** The method for the HTTP request. */\n httpMethod?: string;\n /** The desired timeout for the request. Default is 30 seconds. */\n timeout?: string;\n /** Applies to custom endpoints that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the vectorization connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */\n authResourceId?: string;\n /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to \"none\", the value of this property is cleared. */\n authIdentity?: SearchIndexerDataIdentity;\n}\n\n/** Contains configuration options on how to vectorize text vector queries. */\nexport type VectorSearchVectorizer = AzureOpenAIVectorizer | WebApiVectorizer;\n\n/** Contains the parameters specific to using an Azure Open AI service for vectorization at query time. */\nexport interface AzureOpenAIParameters {\n /** The resource uri for your Azure Open AI resource. */\n resourceUrl?: string;\n /** ID of your Azure Open AI model deployment on the designated resource. */\n deploymentId?: string;\n /** API key for the designated Azure Open AI resource. */\n apiKey?: string;\n /** The user-assigned managed identity used for outbound connections. */\n authIdentity?: SearchIndexerDataIdentity;\n /** The name of the embedding model that is deployed at the provided deploymentId path. */\n modelName?: AzureOpenAIModelName;\n}\n\n/** Allows you to generate a vector embedding for a given text input using the Azure OpenAI resource. */\nexport interface AzureOpenAIEmbeddingSkill extends BaseSearchIndexerSkill, AzureOpenAIParameters {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill\";\n /** The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models. */\n dimensions?: number;\n}\n\n/** A dictionary of knowledge store-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */\nexport interface SearchIndexerKnowledgeStoreParameters {\n /** Describes unknown properties. The value of an unknown property can be of \"any\" type. */\n [property: string]: unknown;\n /** Whether or not projections should synthesize a generated key name if one isn't already present. */\n synthesizeGeneratedKeyName?: boolean;\n}\n\n/** A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */\nexport interface IndexingParametersConfiguration {\n /** Describes unknown properties. The value of an unknown property can be of \"any\" type. */\n [property: string]: any;\n /** Represents the parsing mode for indexing from an Azure blob data source. */\n parsingMode?: BlobIndexerParsingMode;\n /** Comma-delimited list of filename extensions to ignore when processing from Azure blob storage. For example, you could exclude \".png, .mp4\" to skip over those files during indexing. */\n excludedFileNameExtensions?: string;\n /** Comma-delimited list of filename extensions to select when processing from Azure blob storage. For example, you could focus indexing on specific application files \".docx, .pptx, .msg\" to specifically include those file types. */\n indexedFileNameExtensions?: string;\n /** For Azure blobs, set to false if you want to continue indexing when an unsupported content type is encountered, and you don't know all the content types (file extensions) in advance. */\n failOnUnsupportedContentType?: boolean;\n /** For Azure blobs, set to false if you want to continue indexing if a document fails indexing. */\n failOnUnprocessableDocument?: boolean;\n /** For Azure blobs, set this property to true to still index storage metadata for blob content that is too large to process. Oversized blobs are treated as errors by default. For limits on blob size, see https://docs.microsoft.com/azure/search/search-limits-quotas-capacity. */\n indexStorageMetadataOnlyForOversizedDocuments?: boolean;\n /** For CSV blobs, specifies a comma-delimited list of column headers, useful for mapping source fields to destination fields in an index. */\n delimitedTextHeaders?: string;\n /** For CSV blobs, specifies the end-of-line single-character delimiter for CSV files where each line starts a new document (for example, \"|\"). */\n delimitedTextDelimiter?: string;\n /** For CSV blobs, indicates that the first (non-blank) line of each blob contains headers. */\n firstLineContainsHeaders?: boolean;\n /** For JSON arrays, given a structured or semi-structured document, you can specify a path to the array using this property. */\n documentRoot?: string;\n /** Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when \"imageAction\" is set to a value other than \"none\". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. */\n dataToExtract?: BlobIndexerDataToExtract;\n /** Determines how to process embedded images and image files in Azure blob storage. Setting the \"imageAction\" configuration to any value other than \"none\" requires that a skillset also be attached to that indexer. */\n imageAction?: BlobIndexerImageAction;\n /** If true, will create a path //document//file_data that is an object representing the original file data downloaded from your blob data source. This allows you to pass the original file data to a custom skill for processing within the enrichment pipeline, or to the Document Extraction skill. */\n allowSkillsetToReadFileData?: boolean;\n /** Determines algorithm for text extraction from PDF files in Azure blob storage. */\n pdfTextRotationAlgorithm?: BlobIndexerPDFTextRotationAlgorithm;\n /** Specifies the environment in which the indexer should execute. */\n executionEnvironment?: IndexerExecutionEnvironment;\n /** Increases the timeout beyond the 5-minute default for Azure SQL database data sources, specified in the format \"hh:mm:ss\". */\n queryTimeout?: string;\n}\n\n/** Represents parameters for indexer execution. */\nexport interface IndexingParameters {\n /** The number of items that are read from the data source and indexed as a single batch in order to improve performance. The default depends on the data source type. */\n batchSize?: number;\n /** The maximum number of items that can fail indexing for indexer execution to still be considered successful. -1 means no limit. Default is 0. */\n maxFailedItems?: number;\n /** The maximum number of items in a single batch that can fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0. */\n maxFailedItemsPerBatch?: number;\n /** A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */\n configuration?: IndexingParametersConfiguration;\n}\n\n/** A skill looks for text from a custom, user-defined list of words and phrases. */\nexport interface CustomEntityLookupSkill extends BaseSearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.CustomEntityLookupSkill\";\n /** A value indicating which language code to use. Default is en. */\n defaultLanguageCode?: CustomEntityLookupSkillLanguage;\n /** Path to a JSON or CSV file containing all the target text to match against. This entity definition is read at the beginning of an indexer run. Any updates to this file during an indexer run will not take effect until subsequent runs. This config must be accessible over HTTPS. */\n entitiesDefinitionUri?: string;\n /** The inline CustomEntity definition. */\n inlineEntitiesDefinition?: CustomEntity[];\n /** A global flag for CaseSensitive. If CaseSensitive is not set in CustomEntity, this value will be the default value. */\n globalDefaultCaseSensitive?: boolean;\n /** A global flag for AccentSensitive. If AccentSensitive is not set in CustomEntity, this value will be the default value. */\n globalDefaultAccentSensitive?: boolean;\n /** A global flag for FuzzyEditDistance. If FuzzyEditDistance is not set in CustomEntity, this value will be the default value. */\n globalDefaultFuzzyEditDistance?: number;\n}\n\n/**\n * Text analytics entity recognition.\n *\n * @deprecated This skill has been deprecated.\n */\nexport interface EntityRecognitionSkill extends BaseSearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.EntityRecognitionSkill\";\n /** A list of entity categories that should be extracted. */\n categories?: EntityCategory[];\n /** A value indicating which language code to use. Default is en. */\n defaultLanguageCode?: EntityRecognitionSkillLanguage;\n /** Determines whether or not to include entities which are well known but don't conform to a pre-defined type. If this configuration is not set (default), set to null or set to false, entities which don't conform to one of the pre-defined types will not be surfaced. */\n includeTypelessEntities?: boolean;\n /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */\n minimumPrecision?: number;\n}\n\n/** A skill that analyzes image files. It extracts a rich set of visual features based on the image content. */\nexport interface ImageAnalysisSkill extends BaseSearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Vision.ImageAnalysisSkill\";\n /** A value indicating which language code to use. Default is en. */\n defaultLanguageCode?: ImageAnalysisSkillLanguage;\n /** A list of visual features. */\n visualFeatures?: VisualFeature[];\n /** A string indicating which domain-specific details to return. */\n details?: ImageDetail[];\n}\n\n/** A skill that uses text analytics for key phrase extraction. */\nexport interface KeyPhraseExtractionSkill extends BaseSearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.KeyPhraseExtractionSkill\";\n /** A value indicating which language code to use. Default is en. */\n defaultLanguageCode?: KeyPhraseExtractionSkillLanguage;\n /** A number indicating how many key phrases to return. If absent, all identified key phrases will be returned. */\n maxKeyPhraseCount?: number;\n /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */\n modelVersion?: string;\n}\n\n/** A skill that extracts text from image files. */\nexport interface OcrSkill extends BaseSearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Vision.OcrSkill\";\n /** A value indicating which language code to use. Default is en. */\n defaultLanguageCode?: OcrSkillLanguage;\n /** A value indicating to turn orientation detection on or not. Default is false. */\n shouldDetectOrientation?: boolean;\n}\n\n/** Using the Text Analytics API, extracts personal information from an input text and gives you the option of masking it. */\nexport interface PIIDetectionSkill extends BaseSearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.PIIDetectionSkill\";\n /** A value indicating which language code to use. Default is en. */\n defaultLanguageCode?: string;\n /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */\n minimumPrecision?: number;\n /** A parameter that provides various ways to mask the personal information detected in the input text. Default is 'none'. */\n maskingMode?: PIIDetectionSkillMaskingMode;\n /** The character used to mask the text if the maskingMode parameter is set to replace. Default is '*'. */\n maskingCharacter?: string;\n /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */\n modelVersion?: string;\n /** A list of PII entity categories that should be extracted and masked. */\n categories?: string[];\n /** If specified, will set the PII domain to include only a subset of the entity categories. Possible values include: 'phi', 'none'. Default is 'none'. */\n domain?: string;\n}\n\n/**\n * Text analytics positive-negative sentiment analysis, scored as a floating point value in a range of zero to 1.\n *\n * @deprecated This skill has been deprecated.\n */\nexport interface SentimentSkill extends BaseSearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.SentimentSkill\";\n /** A value indicating which language code to use. Default is en. */\n defaultLanguageCode?: SentimentSkillLanguage;\n}\n\n/** A skill to split a string into chunks of text. */\nexport interface SplitSkill extends BaseSearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.SplitSkill\";\n /** A value indicating which language code to use. Default is en. */\n defaultLanguageCode?: SplitSkillLanguage;\n /** A value indicating which split mode to perform. */\n textSplitMode?: TextSplitMode;\n /** The desired maximum page length. Default is 10000. */\n maxPageLength?: number;\n}\n\n/** A skill to translate text from one language to another. */\nexport interface TextTranslationSkill extends BaseSearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.TranslationSkill\";\n /** The language code to translate documents into for documents that don't specify the to language explicitly. */\n defaultToLanguageCode: TextTranslationSkillLanguage;\n /** The language code to translate documents from for documents that don't specify the from language explicitly. */\n defaultFromLanguageCode?: TextTranslationSkillLanguage;\n /** The language code to translate documents from when neither the fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the automatic language detection is unsuccessful. Default is en. */\n suggestedFrom?: TextTranslationSkillLanguage;\n}\n\n/** A skill that analyzes image files. It extracts a rich set of visual features based on the image content. */\nexport interface ImageAnalysisSkill extends BaseSearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Vision.ImageAnalysisSkill\";\n /** A value indicating which language code to use. Default is en. */\n defaultLanguageCode?: ImageAnalysisSkillLanguage;\n /** A list of visual features. */\n visualFeatures?: VisualFeature[];\n /** A string indicating which domain-specific details to return. */\n details?: ImageDetail[];\n}\n\n/** Contains configuration options specific to the compression method used during indexing or querying. */\nexport type VectorSearchCompression = BinaryQuantizationCompression | ScalarQuantizationCompression;\n\nexport type AnalyzerNames = `${KnownLexicalAnalyzerName}`;\nexport type BlobIndexerDataToExtract = `${KnownBlobIndexerDataToExtract}`;\nexport type BlobIndexerImageAction = `${KnownBlobIndexerImageAction}`;\nexport type BlobIndexerParsingMode = `${KnownBlobIndexerParsingMode}`;\nexport type BlobIndexerPDFTextRotationAlgorithm = `${KnownBlobIndexerPDFTextRotationAlgorithm}`;\nexport type CharFilterNames = `${KnownCharFilterName}`;\nexport type CustomEntityLookupSkillLanguage = `${KnownCustomEntityLookupSkillLanguage}`;\nexport type EntityCategory = `${KnownEntityCategory}`;\nexport type EntityRecognitionSkillLanguage = `${KnownEntityRecognitionSkillLanguage}`;\nexport type ImageAnalysisSkillLanguage = `${KnownImageAnalysisSkillLanguage}`;\nexport type ImageDetail = `${KnownImageDetail}`;\nexport type IndexerExecutionEnvironment = `${KnownIndexerExecutionEnvironment}`;\nexport type KeyPhraseExtractionSkillLanguage = `${KnownKeyPhraseExtractionSkillLanguage}`;\nexport type OcrSkillLanguage = `${KnownOcrSkillLanguage}`;\nexport type PIIDetectionSkillMaskingMode = `${KnownPIIDetectionSkillMaskingMode}`;\nexport type RegexFlags = `${KnownRegexFlags}`;\n/**\n * Defines values for SearchFieldDataType.\n *\n * ### Known values supported by the service:\n *\n * **Edm.String**: Indicates that a field contains a string.\n *\n * **Edm.Int32**: Indicates that a field contains a 32-bit signed integer.\n *\n * **Edm.Int64**: Indicates that a field contains a 64-bit signed integer.\n *\n * **Edm.Double**: Indicates that a field contains an IEEE double-precision floating point number.\n *\n * **Edm.Boolean**: Indicates that a field contains a Boolean value (true or false).\n *\n * **Edm.DateTimeOffset**: Indicates that a field contains a date/time value, including timezone\n * information.\n *\n * **Edm.GeographyPoint**: Indicates that a field contains a geo-location in terms of longitude and\n * latitude.\n *\n * **Edm.ComplexType**: Indicates that a field contains one or more complex objects that in turn\n * have sub-fields of other types.\n *\n * **Edm.Single**: Indicates that a field contains a single-precision floating point number. This is\n * only valid when used as part of a collection type, i.e. Collection(Edm.Single).\n *\n * **Edm.Half**: Indicates that a field contains a half-precision floating point number. This is\n * only valid when used as part of a collection type, i.e. Collection(Edm.Half).\n *\n * **Edm.Int16**: Indicates that a field contains a 16-bit signed integer. This is only valid when\n * used as part of a collection type, i.e. Collection(Edm.Int16).\n *\n * **Edm.SByte**: Indicates that a field contains a 8-bit signed integer. This is only valid when\n * used as part of a collection type, i.e. Collection(Edm.SByte).\n *\n * **Edm.Byte**: Indicates that a field contains a 8-bit unsigned integer. This is only valid when\n * used as part of a collection type, i.e. Collection(Edm.Byte).\n */\nexport type SearchFieldDataType = Exclude<\n `${KnownSearchFieldDataType}` | `Collection(${KnownSearchFieldDataType})`,\n \"Edm.ComplexType\" | \"Edm.Byte\" | \"Edm.Half\" | \"Edm.Int16\" | \"Edm.SByte\" | \"Edm.Single\"\n>;\nexport type SearchIndexerDataSourceType = `${KnownSearchIndexerDataSourceType}`;\nexport type SentimentSkillLanguage = `${KnownSentimentSkillLanguage}`;\nexport type SplitSkillLanguage = `${KnownSplitSkillLanguage}`;\nexport type TextSplitMode = `${KnownTextSplitMode}`;\nexport type TextTranslationSkillLanguage = `${KnownTextTranslationSkillLanguage}`;\nexport type TokenFilterNames = `${KnownTokenFilterName}`;\nexport type TokenizerNames = `${KnownLexicalTokenizerName}`;\nexport type VectorSearchAlgorithmKind = `${KnownVectorSearchAlgorithmKind}`;\nexport type VectorSearchAlgorithmMetric = `${KnownVectorSearchAlgorithmMetric}`;\nexport type VisualFeature = `${KnownVisualFeature}`;\n\n// END manually modified generated interfaces\n"]}
|