@azure/search-documents 12.2.0-beta.2 → 12.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +3 -3
- package/README.md +208 -232
- package/dist/browser/base64-browser.d.mts.map +1 -0
- package/dist/browser/base64-browser.mjs.map +1 -0
- package/dist/browser/base64.d.ts +11 -0
- package/{dist-esm/src/base64.browser.js → dist/browser/base64.js} +1 -1
- package/dist/browser/errorModels.d.ts +50 -0
- package/dist/browser/errorModels.d.ts.map +1 -0
- package/dist/browser/errorModels.js.map +1 -0
- package/dist/browser/generated/data/index.d.ts +4 -0
- package/dist/browser/generated/data/index.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/generated/data/index.js +3 -3
- package/dist/browser/generated/data/index.js.map +1 -0
- package/dist/browser/generated/data/models/index.d.ts +783 -0
- package/dist/browser/generated/data/models/index.d.ts.map +1 -0
- package/dist/browser/generated/data/models/index.js +80 -0
- package/dist/browser/generated/data/models/index.js.map +1 -0
- package/dist/browser/generated/data/models/mappers.d.ts +34 -0
- package/dist/browser/generated/data/models/mappers.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/generated/data/models/mappers.js +18 -365
- package/dist/browser/generated/data/models/mappers.js.map +1 -0
- package/dist/browser/generated/data/models/parameters.d.ts +58 -0
- package/dist/browser/generated/data/models/parameters.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/generated/data/models/parameters.js +1 -43
- package/dist/browser/generated/data/models/parameters.js.map +1 -0
- package/dist/browser/generated/data/operations/documents.d.ts +70 -0
- package/dist/browser/generated/data/operations/documents.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/generated/data/operations/documents.js +9 -6
- package/dist/browser/generated/data/operations/documents.js.map +1 -0
- package/dist/browser/generated/data/operations/index.d.ts +2 -0
- package/dist/browser/generated/data/operations/index.d.ts.map +1 -0
- package/{dist-esm/src/generated/data/operationsInterfaces → dist/browser/generated/data/operations}/index.js +1 -1
- package/dist/browser/generated/data/operations/index.js.map +1 -0
- package/dist/browser/generated/data/operationsInterfaces/documents.d.ts +62 -0
- package/dist/browser/generated/data/operationsInterfaces/documents.d.ts.map +1 -0
- package/dist/browser/generated/data/operationsInterfaces/documents.js.map +1 -0
- package/dist/browser/generated/data/operationsInterfaces/index.d.ts +2 -0
- package/dist/browser/generated/data/operationsInterfaces/index.d.ts.map +1 -0
- package/{dist-esm/src/generated/data/operations → dist/browser/generated/data/operationsInterfaces}/index.js +1 -1
- package/dist/browser/generated/data/operationsInterfaces/index.js.map +1 -0
- package/dist/browser/generated/data/searchClient.d.ts +21 -0
- package/dist/browser/generated/data/searchClient.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/generated/data/searchClient.js +15 -5
- package/dist/browser/generated/data/searchClient.js.map +1 -0
- package/dist/browser/generated/service/index.d.ts +4 -0
- package/dist/browser/generated/service/index.d.ts.map +1 -0
- package/dist/browser/generated/service/index.js +11 -0
- package/dist/browser/generated/service/index.js.map +1 -0
- package/dist/browser/generated/service/models/index.d.ts +4428 -0
- package/dist/browser/generated/service/models/index.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/generated/service/models/index.js +44 -94
- package/dist/browser/generated/service/models/index.js.map +1 -0
- package/dist/browser/generated/service/models/mappers.d.ts +272 -0
- package/dist/browser/generated/service/models/mappers.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/generated/service/models/mappers.js +794 -906
- package/dist/browser/generated/service/models/mappers.js.map +1 -0
- package/dist/browser/generated/service/models/parameters.d.ts +22 -0
- package/dist/browser/generated/service/models/parameters.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/generated/service/models/parameters.js +1 -51
- package/dist/browser/generated/service/models/parameters.js.map +1 -0
- package/dist/browser/generated/service/operations/aliases.d.ts +2 -0
- package/dist/browser/generated/service/operations/aliases.d.ts.map +1 -0
- package/dist/browser/generated/service/operations/aliases.js +201 -0
- package/dist/browser/generated/service/operations/aliases.js.map +1 -0
- package/dist/browser/generated/service/operations/dataSources.d.ts +43 -0
- package/dist/browser/generated/service/operations/dataSources.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/generated/service/operations/dataSources.js +4 -6
- package/dist/browser/generated/service/operations/dataSources.js.map +1 -0
- package/dist/browser/generated/service/operations/index.d.ts +6 -0
- package/dist/browser/generated/service/operations/index.d.ts.map +1 -0
- package/dist/browser/generated/service/operations/index.js +13 -0
- package/dist/browser/generated/service/operations/index.js.map +1 -0
- package/dist/browser/generated/service/operations/indexers.d.ts +61 -0
- package/dist/browser/generated/service/operations/indexers.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/generated/service/operations/indexers.js +4 -31
- package/dist/browser/generated/service/operations/indexers.js.map +1 -0
- package/dist/browser/generated/service/operations/indexes.d.ts +58 -0
- package/dist/browser/generated/service/operations/indexes.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/generated/service/operations/indexes.js +3 -2
- package/dist/browser/generated/service/operations/indexes.js.map +1 -0
- package/dist/browser/generated/service/operations/skillsets.d.ts +43 -0
- package/dist/browser/generated/service/operations/skillsets.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/generated/service/operations/skillsets.js +4 -32
- package/dist/browser/generated/service/operations/skillsets.js.map +1 -0
- package/dist/browser/generated/service/operations/synonymMaps.d.ts +43 -0
- package/dist/browser/generated/service/operations/synonymMaps.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/generated/service/operations/synonymMaps.js +3 -2
- package/dist/browser/generated/service/operations/synonymMaps.js.map +1 -0
- package/dist/browser/generated/service/operationsInterfaces/aliases.d.ts +2 -0
- package/dist/browser/generated/service/operationsInterfaces/aliases.d.ts.map +1 -0
- package/dist/browser/generated/service/operationsInterfaces/aliases.js +68 -0
- package/dist/browser/generated/service/operationsInterfaces/aliases.js.map +1 -0
- package/dist/browser/generated/service/operationsInterfaces/dataSources.d.ts +35 -0
- package/dist/browser/generated/service/operationsInterfaces/dataSources.d.ts.map +1 -0
- package/dist/browser/generated/service/operationsInterfaces/dataSources.js.map +1 -0
- package/dist/browser/generated/service/operationsInterfaces/index.d.ts +6 -0
- package/dist/browser/generated/service/operationsInterfaces/index.d.ts.map +1 -0
- package/dist/browser/generated/service/operationsInterfaces/index.js +13 -0
- package/dist/browser/generated/service/operationsInterfaces/index.js.map +1 -0
- package/dist/browser/generated/service/operationsInterfaces/indexers.d.ts +53 -0
- package/dist/browser/generated/service/operationsInterfaces/indexers.d.ts.map +1 -0
- package/dist/browser/generated/service/operationsInterfaces/indexers.js.map +1 -0
- package/dist/browser/generated/service/operationsInterfaces/indexes.d.ts +50 -0
- package/dist/browser/generated/service/operationsInterfaces/indexes.d.ts.map +1 -0
- package/dist/browser/generated/service/operationsInterfaces/indexes.js.map +1 -0
- package/dist/browser/generated/service/operationsInterfaces/skillsets.d.ts +35 -0
- package/dist/browser/generated/service/operationsInterfaces/skillsets.d.ts.map +1 -0
- package/dist/browser/generated/service/operationsInterfaces/skillsets.js.map +1 -0
- package/dist/browser/generated/service/operationsInterfaces/synonymMaps.d.ts +35 -0
- package/dist/browser/generated/service/operationsInterfaces/synonymMaps.d.ts.map +1 -0
- package/dist/browser/generated/service/operationsInterfaces/synonymMaps.js.map +1 -0
- package/dist/browser/generated/service/searchServiceClient.d.ts +28 -0
- package/dist/browser/generated/service/searchServiceClient.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/generated/service/searchServiceClient.js +18 -8
- package/dist/browser/generated/service/searchServiceClient.js.map +1 -0
- package/dist/browser/geographyPoint.d.ts +27 -0
- package/dist/browser/geographyPoint.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/geographyPoint.js +8 -0
- package/dist/browser/geographyPoint.js.map +1 -0
- package/dist/browser/index.d.ts +15 -0
- package/dist/browser/index.d.ts.map +1 -0
- package/dist/browser/index.js +16 -0
- package/dist/browser/index.js.map +1 -0
- package/dist/browser/indexDocumentsBatch.d.ts +41 -0
- package/dist/browser/indexDocumentsBatch.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/indexDocumentsBatch.js +20 -4
- package/dist/browser/indexDocumentsBatch.js.map +1 -0
- package/dist/browser/indexModels.d.ts +722 -0
- package/dist/browser/indexModels.d.ts.map +1 -0
- package/dist/browser/indexModels.js.map +1 -0
- package/dist/browser/logger.d.ts +5 -0
- package/dist/browser/logger.d.ts.map +1 -0
- package/dist/browser/odata.d.ts +14 -0
- package/dist/browser/odata.d.ts.map +1 -0
- package/dist/browser/odataMetadataPolicy.d.ts +9 -0
- package/dist/browser/odataMetadataPolicy.d.ts.map +1 -0
- package/dist/browser/odataMetadataPolicy.js.map +1 -0
- package/dist/browser/package.json +3 -0
- package/dist/browser/searchApiKeyCredentialPolicy.d.ts +8 -0
- package/dist/browser/searchApiKeyCredentialPolicy.d.ts.map +1 -0
- package/dist/browser/searchApiKeyCredentialPolicy.js.map +1 -0
- package/dist/browser/searchAudience.d.ts +18 -0
- package/dist/browser/searchAudience.d.ts.map +1 -0
- package/dist/browser/searchClient.d.ts +266 -0
- package/dist/browser/searchClient.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/searchClient.js +138 -138
- package/dist/browser/searchClient.js.map +1 -0
- package/dist/browser/searchIndexClient.d.ts +177 -0
- package/dist/browser/searchIndexClient.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/searchIndexClient.js +118 -268
- package/dist/browser/searchIndexClient.js.map +1 -0
- package/dist/browser/searchIndexerClient.d.ts +187 -0
- package/dist/browser/searchIndexerClient.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/searchIndexerClient.js +79 -93
- package/dist/browser/searchIndexerClient.js.map +1 -0
- package/dist/browser/searchIndexingBufferedSender.d.ts +193 -0
- package/dist/browser/searchIndexingBufferedSender.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/searchIndexingBufferedSender.js +54 -15
- package/dist/browser/searchIndexingBufferedSender.js.map +1 -0
- package/dist/browser/serialization.d.ts +3 -0
- package/dist/browser/serialization.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/serialization.js +2 -2
- package/dist/browser/serialization.js.map +1 -0
- package/dist/browser/serviceModels.d.ts +2181 -0
- package/dist/browser/serviceModels.d.ts.map +1 -0
- package/dist/browser/serviceModels.js.map +1 -0
- package/dist/browser/serviceUtils.d.ts +35 -0
- package/dist/browser/serviceUtils.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/serviceUtils.js +139 -139
- package/dist/browser/serviceUtils.js.map +1 -0
- package/dist/browser/synonymMapHelper-browser.d.mts.map +1 -0
- package/dist/browser/synonymMapHelper-browser.mjs.map +1 -0
- package/dist/browser/synonymMapHelper.d.ts +11 -0
- package/{dist-esm/src/synonymMapHelper.browser.js → dist/browser/synonymMapHelper.js} +1 -1
- package/dist/browser/tracing.d.ts +7 -0
- package/dist/browser/tracing.d.ts.map +1 -0
- package/dist/browser/walk.d.ts +2 -0
- package/dist/browser/walk.d.ts.map +1 -0
- package/{dist-esm/src → dist/browser}/walk.js +9 -12
- package/dist/browser/walk.js.map +1 -0
- package/dist/commonjs/base64.d.ts +11 -0
- package/dist/commonjs/base64.d.ts.map +1 -0
- package/dist/commonjs/base64.js +21 -0
- package/dist/commonjs/base64.js.map +1 -0
- package/dist/commonjs/errorModels.d.ts +50 -0
- package/dist/commonjs/errorModels.d.ts.map +1 -0
- package/dist/commonjs/errorModels.js +5 -0
- package/dist/commonjs/errorModels.js.map +1 -0
- package/dist/commonjs/generated/data/index.d.ts +4 -0
- package/dist/commonjs/generated/data/index.d.ts.map +1 -0
- package/dist/commonjs/generated/data/index.js +16 -0
- package/dist/commonjs/generated/data/index.js.map +1 -0
- package/dist/commonjs/generated/data/models/index.d.ts +783 -0
- package/dist/commonjs/generated/data/models/index.d.ts.map +1 -0
- package/dist/commonjs/generated/data/models/index.js +83 -0
- package/dist/commonjs/generated/data/models/index.js.map +1 -0
- package/dist/commonjs/generated/data/models/mappers.d.ts +34 -0
- package/dist/commonjs/generated/data/models/mappers.d.ts.map +1 -0
- package/dist/commonjs/generated/data/models/mappers.js +1091 -0
- package/dist/commonjs/generated/data/models/mappers.js.map +1 -0
- package/dist/commonjs/generated/data/models/parameters.d.ts +58 -0
- package/dist/commonjs/generated/data/models/parameters.d.ts.map +1 -0
- package/dist/commonjs/generated/data/models/parameters.js +581 -0
- package/dist/commonjs/generated/data/models/parameters.js.map +1 -0
- package/dist/commonjs/generated/data/operations/documents.d.ts +70 -0
- package/dist/commonjs/generated/data/operations/documents.d.ts.map +1 -0
- package/dist/commonjs/generated/data/operations/documents.js +320 -0
- package/dist/commonjs/generated/data/operations/documents.js.map +1 -0
- package/dist/commonjs/generated/data/operations/index.d.ts +2 -0
- package/dist/commonjs/generated/data/operations/index.d.ts.map +1 -0
- package/dist/commonjs/generated/data/operations/index.js +12 -0
- package/dist/commonjs/generated/data/operations/index.js.map +1 -0
- package/dist/commonjs/generated/data/operationsInterfaces/documents.d.ts +62 -0
- package/dist/commonjs/generated/data/operationsInterfaces/documents.d.ts.map +1 -0
- package/dist/commonjs/generated/data/operationsInterfaces/documents.js +10 -0
- package/dist/commonjs/generated/data/operationsInterfaces/documents.js.map +1 -0
- package/dist/commonjs/generated/data/operationsInterfaces/index.d.ts +2 -0
- package/dist/commonjs/generated/data/operationsInterfaces/index.d.ts.map +1 -0
- package/dist/commonjs/generated/data/operationsInterfaces/index.js +12 -0
- package/dist/commonjs/generated/data/operationsInterfaces/index.js.map +1 -0
- package/dist/commonjs/generated/data/searchClient.d.ts +21 -0
- package/dist/commonjs/generated/data/searchClient.d.ts.map +1 -0
- package/dist/commonjs/generated/data/searchClient.js +93 -0
- package/dist/commonjs/generated/data/searchClient.js.map +1 -0
- package/dist/commonjs/generated/service/index.d.ts +4 -0
- package/dist/commonjs/generated/service/index.d.ts.map +1 -0
- package/dist/commonjs/generated/service/index.js +16 -0
- package/dist/commonjs/generated/service/index.js.map +1 -0
- package/dist/commonjs/generated/service/models/index.d.ts +4428 -0
- package/dist/commonjs/generated/service/models/index.d.ts.map +1 -0
- package/dist/commonjs/generated/service/models/index.js +1459 -0
- package/dist/commonjs/generated/service/models/index.js.map +1 -0
- package/dist/commonjs/generated/service/models/mappers.d.ts +272 -0
- package/dist/commonjs/generated/service/models/mappers.d.ts.map +1 -0
- package/dist/commonjs/generated/service/models/mappers.js +6077 -0
- package/dist/commonjs/generated/service/models/mappers.js.map +1 -0
- package/dist/commonjs/generated/service/models/parameters.d.ts +22 -0
- package/dist/commonjs/generated/service/models/parameters.d.ts.map +1 -0
- package/dist/commonjs/generated/service/models/parameters.js +176 -0
- package/dist/commonjs/generated/service/models/parameters.js.map +1 -0
- package/dist/commonjs/generated/service/operations/aliases.d.ts +2 -0
- package/dist/commonjs/generated/service/operations/aliases.d.ts.map +1 -0
- package/dist/commonjs/generated/service/operations/aliases.js +202 -0
- package/dist/commonjs/generated/service/operations/aliases.js.map +1 -0
- package/dist/commonjs/generated/service/operations/dataSources.d.ts +43 -0
- package/dist/commonjs/generated/service/operations/dataSources.d.ts.map +1 -0
- package/dist/commonjs/generated/service/operations/dataSources.js +165 -0
- package/dist/commonjs/generated/service/operations/dataSources.js.map +1 -0
- package/dist/commonjs/generated/service/operations/index.d.ts +6 -0
- package/dist/commonjs/generated/service/operations/index.d.ts.map +1 -0
- package/dist/commonjs/generated/service/operations/index.js +16 -0
- package/dist/commonjs/generated/service/operations/index.js.map +1 -0
- package/dist/commonjs/generated/service/operations/indexers.d.ts +61 -0
- package/dist/commonjs/generated/service/operations/indexers.d.ts.map +1 -0
- package/dist/commonjs/generated/service/operations/indexers.js +233 -0
- package/dist/commonjs/generated/service/operations/indexers.js.map +1 -0
- package/dist/commonjs/generated/service/operations/indexes.d.ts +58 -0
- package/dist/commonjs/generated/service/operations/indexes.d.ts.map +1 -0
- package/dist/commonjs/generated/service/operations/indexes.js +218 -0
- package/dist/commonjs/generated/service/operations/indexes.js.map +1 -0
- package/dist/commonjs/generated/service/operations/skillsets.d.ts +43 -0
- package/dist/commonjs/generated/service/operations/skillsets.d.ts.map +1 -0
- package/dist/commonjs/generated/service/operations/skillsets.js +165 -0
- package/dist/commonjs/generated/service/operations/skillsets.js.map +1 -0
- package/dist/commonjs/generated/service/operations/synonymMaps.d.ts +43 -0
- package/dist/commonjs/generated/service/operations/synonymMaps.d.ts.map +1 -0
- package/dist/commonjs/generated/service/operations/synonymMaps.js +165 -0
- package/dist/commonjs/generated/service/operations/synonymMaps.js.map +1 -0
- package/dist/commonjs/generated/service/operationsInterfaces/aliases.d.ts +2 -0
- package/dist/commonjs/generated/service/operationsInterfaces/aliases.d.ts.map +1 -0
- package/dist/commonjs/generated/service/operationsInterfaces/aliases.js +69 -0
- package/dist/commonjs/generated/service/operationsInterfaces/aliases.js.map +1 -0
- package/dist/commonjs/generated/service/operationsInterfaces/dataSources.d.ts +35 -0
- package/dist/commonjs/generated/service/operationsInterfaces/dataSources.d.ts.map +1 -0
- package/dist/commonjs/generated/service/operationsInterfaces/dataSources.js +10 -0
- package/dist/commonjs/generated/service/operationsInterfaces/dataSources.js.map +1 -0
- package/dist/commonjs/generated/service/operationsInterfaces/index.d.ts +6 -0
- package/dist/commonjs/generated/service/operationsInterfaces/index.d.ts.map +1 -0
- package/dist/commonjs/generated/service/operationsInterfaces/index.js +16 -0
- package/dist/commonjs/generated/service/operationsInterfaces/index.js.map +1 -0
- package/dist/commonjs/generated/service/operationsInterfaces/indexers.d.ts +53 -0
- package/dist/commonjs/generated/service/operationsInterfaces/indexers.d.ts.map +1 -0
- package/dist/commonjs/generated/service/operationsInterfaces/indexers.js +10 -0
- package/dist/commonjs/generated/service/operationsInterfaces/indexers.js.map +1 -0
- package/dist/commonjs/generated/service/operationsInterfaces/indexes.d.ts +50 -0
- package/dist/commonjs/generated/service/operationsInterfaces/indexes.d.ts.map +1 -0
- package/dist/commonjs/generated/service/operationsInterfaces/indexes.js +10 -0
- package/dist/commonjs/generated/service/operationsInterfaces/indexes.js.map +1 -0
- package/dist/commonjs/generated/service/operationsInterfaces/skillsets.d.ts +35 -0
- package/dist/commonjs/generated/service/operationsInterfaces/skillsets.d.ts.map +1 -0
- package/dist/commonjs/generated/service/operationsInterfaces/skillsets.js +10 -0
- package/dist/commonjs/generated/service/operationsInterfaces/skillsets.js.map +1 -0
- package/dist/commonjs/generated/service/operationsInterfaces/synonymMaps.d.ts +35 -0
- package/dist/commonjs/generated/service/operationsInterfaces/synonymMaps.d.ts.map +1 -0
- package/dist/commonjs/generated/service/operationsInterfaces/synonymMaps.js +10 -0
- package/dist/commonjs/generated/service/operationsInterfaces/synonymMaps.js.map +1 -0
- package/dist/commonjs/generated/service/searchServiceClient.d.ts +28 -0
- package/dist/commonjs/generated/service/searchServiceClient.d.ts.map +1 -0
- package/dist/commonjs/generated/service/searchServiceClient.js +121 -0
- package/dist/commonjs/generated/service/searchServiceClient.js.map +1 -0
- package/dist/commonjs/geographyPoint.d.ts +27 -0
- package/dist/commonjs/geographyPoint.d.ts.map +1 -0
- package/dist/commonjs/geographyPoint.js +39 -0
- package/dist/commonjs/geographyPoint.js.map +1 -0
- package/dist/commonjs/index.d.ts +15 -0
- package/dist/commonjs/index.d.ts.map +1 -0
- package/dist/commonjs/index.js +81 -0
- package/dist/commonjs/index.js.map +1 -0
- package/dist/commonjs/indexDocumentsBatch.d.ts +41 -0
- package/dist/commonjs/indexDocumentsBatch.d.ts.map +1 -0
- package/dist/commonjs/indexDocumentsBatch.js +83 -0
- package/dist/commonjs/indexDocumentsBatch.js.map +1 -0
- package/dist/commonjs/indexModels.d.ts +722 -0
- package/dist/commonjs/indexModels.d.ts.map +1 -0
- package/dist/commonjs/indexModels.js +5 -0
- package/dist/commonjs/indexModels.js.map +1 -0
- package/dist/commonjs/logger.d.ts +5 -0
- package/dist/commonjs/logger.d.ts.map +1 -0
- package/dist/commonjs/logger.js +11 -0
- package/dist/commonjs/logger.js.map +1 -0
- package/dist/commonjs/odata.d.ts +14 -0
- package/dist/commonjs/odata.d.ts.map +1 -0
- package/dist/commonjs/odata.js +50 -0
- package/dist/commonjs/odata.js.map +1 -0
- package/dist/commonjs/odataMetadataPolicy.d.ts +9 -0
- package/dist/commonjs/odataMetadataPolicy.d.ts.map +1 -0
- package/dist/commonjs/odataMetadataPolicy.js +21 -0
- package/dist/commonjs/odataMetadataPolicy.js.map +1 -0
- package/dist/commonjs/package.json +3 -0
- package/dist/commonjs/searchApiKeyCredentialPolicy.d.ts +8 -0
- package/dist/commonjs/searchApiKeyCredentialPolicy.d.ts.map +1 -0
- package/dist/commonjs/searchApiKeyCredentialPolicy.js +23 -0
- package/dist/commonjs/searchApiKeyCredentialPolicy.js.map +1 -0
- package/dist/commonjs/searchAudience.d.ts +18 -0
- package/dist/commonjs/searchAudience.d.ts.map +1 -0
- package/dist/commonjs/searchAudience.js +24 -0
- package/dist/commonjs/searchAudience.js.map +1 -0
- package/dist/commonjs/searchClient.d.ts +266 -0
- package/dist/commonjs/searchClient.d.ts.map +1 -0
- package/dist/commonjs/searchClient.js +669 -0
- package/dist/commonjs/searchClient.js.map +1 -0
- package/dist/commonjs/searchIndexClient.d.ts +177 -0
- package/dist/commonjs/searchIndexClient.d.ts.map +1 -0
- package/dist/commonjs/searchIndexClient.js +514 -0
- package/dist/commonjs/searchIndexClient.js.map +1 -0
- package/dist/commonjs/searchIndexerClient.d.ts +187 -0
- package/dist/commonjs/searchIndexerClient.d.ts.map +1 -0
- package/dist/commonjs/searchIndexerClient.js +591 -0
- package/dist/commonjs/searchIndexerClient.js.map +1 -0
- package/dist/commonjs/searchIndexingBufferedSender.d.ts +193 -0
- package/dist/commonjs/searchIndexingBufferedSender.d.ts.map +1 -0
- package/dist/commonjs/searchIndexingBufferedSender.js +327 -0
- package/dist/commonjs/searchIndexingBufferedSender.js.map +1 -0
- package/dist/commonjs/serialization.d.ts +3 -0
- package/dist/commonjs/serialization.d.ts.map +1 -0
- package/dist/commonjs/serialization.js +149 -0
- package/dist/commonjs/serialization.js.map +1 -0
- package/dist/commonjs/serviceModels.d.ts +2181 -0
- package/dist/commonjs/serviceModels.d.ts.map +1 -0
- package/dist/commonjs/serviceModels.js +658 -0
- package/dist/commonjs/serviceModels.js.map +1 -0
- package/dist/commonjs/serviceUtils.d.ts +35 -0
- package/dist/commonjs/serviceUtils.d.ts.map +1 -0
- package/dist/commonjs/serviceUtils.js +548 -0
- package/dist/commonjs/serviceUtils.js.map +1 -0
- package/dist/commonjs/synonymMapHelper.d.ts +10 -0
- package/dist/commonjs/synonymMapHelper.d.ts.map +1 -0
- package/dist/commonjs/synonymMapHelper.js +28 -0
- package/dist/commonjs/synonymMapHelper.js.map +1 -0
- package/dist/commonjs/tracing.d.ts +7 -0
- package/dist/commonjs/tracing.d.ts.map +1 -0
- package/dist/commonjs/tracing.js +16 -0
- package/dist/commonjs/tracing.js.map +1 -0
- package/dist/commonjs/tsdoc-metadata.json +11 -0
- package/dist/commonjs/walk.d.ts +2 -0
- package/dist/commonjs/walk.d.ts.map +1 -0
- package/dist/commonjs/walk.js +79 -0
- package/dist/commonjs/walk.js.map +1 -0
- package/dist/esm/base64.d.ts +11 -0
- package/dist/esm/base64.d.ts.map +1 -0
- package/dist/esm/errorModels.d.ts +50 -0
- package/dist/esm/errorModels.d.ts.map +1 -0
- package/dist/esm/errorModels.js +4 -0
- package/dist/esm/errorModels.js.map +1 -0
- package/dist/esm/generated/data/index.d.ts +4 -0
- package/dist/esm/generated/data/index.d.ts.map +1 -0
- package/{dist-esm/src/generated/service → dist/esm/generated/data}/index.js +3 -3
- package/dist/esm/generated/data/index.js.map +1 -0
- package/dist/esm/generated/data/models/index.d.ts +783 -0
- package/dist/esm/generated/data/models/index.d.ts.map +1 -0
- package/dist/esm/generated/data/models/index.js +80 -0
- package/dist/esm/generated/data/models/index.js.map +1 -0
- package/dist/esm/generated/data/models/mappers.d.ts +34 -0
- package/dist/esm/generated/data/models/mappers.d.ts.map +1 -0
- package/dist/esm/generated/data/models/mappers.js +1088 -0
- package/dist/esm/generated/data/models/mappers.js.map +1 -0
- package/dist/esm/generated/data/models/parameters.d.ts +58 -0
- package/dist/esm/generated/data/models/parameters.d.ts.map +1 -0
- package/dist/esm/generated/data/models/parameters.js +577 -0
- package/dist/esm/generated/data/models/parameters.js.map +1 -0
- package/dist/esm/generated/data/operations/documents.d.ts +70 -0
- package/dist/esm/generated/data/operations/documents.d.ts.map +1 -0
- package/dist/esm/generated/data/operations/documents.js +315 -0
- package/dist/esm/generated/data/operations/documents.js.map +1 -0
- package/dist/esm/generated/data/operations/index.d.ts +2 -0
- package/dist/esm/generated/data/operations/index.d.ts.map +1 -0
- package/dist/esm/generated/data/operations/index.js +9 -0
- package/dist/esm/generated/data/operations/index.js.map +1 -0
- package/dist/esm/generated/data/operationsInterfaces/documents.d.ts +62 -0
- package/dist/esm/generated/data/operationsInterfaces/documents.d.ts.map +1 -0
- package/dist/esm/generated/data/operationsInterfaces/documents.js +9 -0
- package/dist/esm/generated/data/operationsInterfaces/documents.js.map +1 -0
- package/dist/esm/generated/data/operationsInterfaces/index.d.ts +2 -0
- package/dist/esm/generated/data/operationsInterfaces/index.d.ts.map +1 -0
- package/dist/esm/generated/data/operationsInterfaces/index.js +9 -0
- package/dist/esm/generated/data/operationsInterfaces/index.js.map +1 -0
- package/dist/esm/generated/data/searchClient.d.ts +21 -0
- package/dist/esm/generated/data/searchClient.d.ts.map +1 -0
- package/dist/esm/generated/data/searchClient.js +88 -0
- package/dist/esm/generated/data/searchClient.js.map +1 -0
- package/dist/esm/generated/service/index.d.ts +4 -0
- package/dist/esm/generated/service/index.d.ts.map +1 -0
- package/dist/esm/generated/service/index.js +11 -0
- package/dist/esm/generated/service/index.js.map +1 -0
- package/dist/esm/generated/service/models/index.d.ts +4428 -0
- package/dist/esm/generated/service/models/index.d.ts.map +1 -0
- package/dist/esm/generated/service/models/index.js +1456 -0
- package/dist/esm/generated/service/models/index.js.map +1 -0
- package/dist/esm/generated/service/models/mappers.d.ts +272 -0
- package/dist/esm/generated/service/models/mappers.d.ts.map +1 -0
- package/dist/esm/generated/service/models/mappers.js +6071 -0
- package/dist/esm/generated/service/models/mappers.js.map +1 -0
- package/dist/esm/generated/service/models/parameters.d.ts +22 -0
- package/dist/esm/generated/service/models/parameters.d.ts.map +1 -0
- package/dist/esm/generated/service/models/parameters.js +173 -0
- package/dist/esm/generated/service/models/parameters.js.map +1 -0
- package/dist/esm/generated/service/operations/aliases.d.ts +2 -0
- package/dist/esm/generated/service/operations/aliases.d.ts.map +1 -0
- package/dist/esm/generated/service/operations/aliases.js +201 -0
- package/dist/esm/generated/service/operations/aliases.js.map +1 -0
- package/dist/esm/generated/service/operations/dataSources.d.ts +43 -0
- package/dist/esm/generated/service/operations/dataSources.d.ts.map +1 -0
- package/dist/esm/generated/service/operations/dataSources.js +160 -0
- package/dist/esm/generated/service/operations/dataSources.js.map +1 -0
- package/dist/esm/generated/service/operations/index.d.ts +6 -0
- package/dist/esm/generated/service/operations/index.d.ts.map +1 -0
- package/dist/esm/generated/service/operations/index.js +13 -0
- package/dist/esm/generated/service/operations/index.js.map +1 -0
- package/dist/esm/generated/service/operations/indexers.d.ts +61 -0
- package/dist/esm/generated/service/operations/indexers.d.ts.map +1 -0
- package/dist/esm/generated/service/operations/indexers.js +228 -0
- package/dist/esm/generated/service/operations/indexers.js.map +1 -0
- package/dist/esm/generated/service/operations/indexes.d.ts +58 -0
- package/dist/esm/generated/service/operations/indexes.d.ts.map +1 -0
- package/dist/esm/generated/service/operations/indexes.js +213 -0
- package/dist/esm/generated/service/operations/indexes.js.map +1 -0
- package/dist/esm/generated/service/operations/skillsets.d.ts +43 -0
- package/dist/esm/generated/service/operations/skillsets.d.ts.map +1 -0
- package/{dist-esm/src/generated/service/operations/aliases.js → dist/esm/generated/service/operations/skillsets.js} +71 -71
- package/dist/esm/generated/service/operations/skillsets.js.map +1 -0
- package/dist/esm/generated/service/operations/synonymMaps.d.ts +43 -0
- package/dist/esm/generated/service/operations/synonymMaps.d.ts.map +1 -0
- package/dist/esm/generated/service/operations/synonymMaps.js +160 -0
- package/dist/esm/generated/service/operations/synonymMaps.js.map +1 -0
- package/dist/esm/generated/service/operationsInterfaces/aliases.d.ts +2 -0
- package/dist/esm/generated/service/operationsInterfaces/aliases.d.ts.map +1 -0
- package/dist/esm/generated/service/operationsInterfaces/aliases.js +68 -0
- package/dist/esm/generated/service/operationsInterfaces/aliases.js.map +1 -0
- package/dist/esm/generated/service/operationsInterfaces/dataSources.d.ts +35 -0
- package/dist/esm/generated/service/operationsInterfaces/dataSources.d.ts.map +1 -0
- package/dist/esm/generated/service/operationsInterfaces/dataSources.js +9 -0
- package/dist/esm/generated/service/operationsInterfaces/dataSources.js.map +1 -0
- package/dist/esm/generated/service/operationsInterfaces/index.d.ts +6 -0
- package/dist/esm/generated/service/operationsInterfaces/index.d.ts.map +1 -0
- package/dist/esm/generated/service/operationsInterfaces/index.js +13 -0
- package/dist/esm/generated/service/operationsInterfaces/index.js.map +1 -0
- package/dist/esm/generated/service/operationsInterfaces/indexers.d.ts +53 -0
- package/dist/esm/generated/service/operationsInterfaces/indexers.d.ts.map +1 -0
- package/dist/esm/generated/service/operationsInterfaces/indexers.js +9 -0
- package/dist/esm/generated/service/operationsInterfaces/indexers.js.map +1 -0
- package/dist/esm/generated/service/operationsInterfaces/indexes.d.ts +50 -0
- package/dist/esm/generated/service/operationsInterfaces/indexes.d.ts.map +1 -0
- package/{dist-esm/src/generated/service/operationsInterfaces/aliases.js → dist/esm/generated/service/operationsInterfaces/indexes.js} +1 -1
- package/dist/esm/generated/service/operationsInterfaces/indexes.js.map +1 -0
- package/dist/esm/generated/service/operationsInterfaces/skillsets.d.ts +35 -0
- package/dist/esm/generated/service/operationsInterfaces/skillsets.d.ts.map +1 -0
- package/dist/esm/generated/service/operationsInterfaces/skillsets.js +9 -0
- package/dist/esm/generated/service/operationsInterfaces/skillsets.js.map +1 -0
- package/dist/esm/generated/service/operationsInterfaces/synonymMaps.d.ts +35 -0
- package/dist/esm/generated/service/operationsInterfaces/synonymMaps.d.ts.map +1 -0
- package/dist/esm/generated/service/operationsInterfaces/synonymMaps.js +9 -0
- package/dist/esm/generated/service/operationsInterfaces/synonymMaps.js.map +1 -0
- package/dist/esm/generated/service/searchServiceClient.d.ts +28 -0
- package/dist/esm/generated/service/searchServiceClient.d.ts.map +1 -0
- package/dist/esm/generated/service/searchServiceClient.js +116 -0
- package/dist/esm/generated/service/searchServiceClient.js.map +1 -0
- package/dist/esm/geographyPoint.d.ts +27 -0
- package/dist/esm/geographyPoint.d.ts.map +1 -0
- package/dist/esm/geographyPoint.js +36 -0
- package/dist/esm/geographyPoint.js.map +1 -0
- package/dist/esm/index.d.ts +15 -0
- package/dist/esm/index.d.ts.map +1 -0
- package/dist/esm/index.js +16 -0
- package/dist/esm/index.js.map +1 -0
- package/dist/esm/indexDocumentsBatch.d.ts +41 -0
- package/dist/esm/indexDocumentsBatch.d.ts.map +1 -0
- package/dist/esm/indexDocumentsBatch.js +79 -0
- package/dist/esm/indexDocumentsBatch.js.map +1 -0
- package/dist/esm/indexModels.d.ts +722 -0
- package/dist/esm/indexModels.d.ts.map +1 -0
- package/dist/esm/indexModels.js +4 -0
- package/dist/esm/indexModels.js.map +1 -0
- package/dist/esm/logger.d.ts +5 -0
- package/dist/esm/logger.d.ts.map +1 -0
- package/dist/esm/logger.js +8 -0
- package/dist/esm/logger.js.map +1 -0
- package/dist/esm/odata.d.ts +14 -0
- package/dist/esm/odata.d.ts.map +1 -0
- package/dist/esm/odata.js +47 -0
- package/dist/esm/odata.js.map +1 -0
- package/dist/esm/odataMetadataPolicy.d.ts +9 -0
- package/dist/esm/odataMetadataPolicy.d.ts.map +1 -0
- package/dist/esm/odataMetadataPolicy.js +18 -0
- package/dist/esm/odataMetadataPolicy.js.map +1 -0
- package/dist/esm/package.json +3 -0
- package/dist/esm/searchApiKeyCredentialPolicy.d.ts +8 -0
- package/dist/esm/searchApiKeyCredentialPolicy.d.ts.map +1 -0
- package/dist/esm/searchApiKeyCredentialPolicy.js +20 -0
- package/dist/esm/searchApiKeyCredentialPolicy.js.map +1 -0
- package/dist/esm/searchAudience.d.ts +18 -0
- package/dist/esm/searchAudience.d.ts.map +1 -0
- package/dist/esm/searchAudience.js +21 -0
- package/dist/esm/searchAudience.js.map +1 -0
- package/dist/esm/searchClient.d.ts +266 -0
- package/dist/esm/searchClient.d.ts.map +1 -0
- package/dist/esm/searchClient.js +664 -0
- package/dist/esm/searchClient.js.map +1 -0
- package/dist/esm/searchIndexClient.d.ts +177 -0
- package/dist/esm/searchIndexClient.d.ts.map +1 -0
- package/dist/esm/searchIndexClient.js +509 -0
- package/dist/esm/searchIndexClient.js.map +1 -0
- package/dist/esm/searchIndexerClient.d.ts +187 -0
- package/dist/esm/searchIndexerClient.d.ts.map +1 -0
- package/dist/esm/searchIndexerClient.js +586 -0
- package/dist/esm/searchIndexerClient.js.map +1 -0
- package/dist/esm/searchIndexingBufferedSender.d.ts +193 -0
- package/dist/esm/searchIndexingBufferedSender.d.ts.map +1 -0
- package/dist/esm/searchIndexingBufferedSender.js +322 -0
- package/dist/esm/searchIndexingBufferedSender.js.map +1 -0
- package/dist/esm/serialization.d.ts +3 -0
- package/dist/esm/serialization.d.ts.map +1 -0
- package/dist/esm/serialization.js +144 -0
- package/dist/esm/serialization.js.map +1 -0
- package/dist/esm/serviceModels.d.ts +2181 -0
- package/dist/esm/serviceModels.d.ts.map +1 -0
- package/dist/esm/serviceModels.js +654 -0
- package/dist/esm/serviceModels.js.map +1 -0
- package/dist/esm/serviceUtils.d.ts +35 -0
- package/dist/esm/serviceUtils.d.ts.map +1 -0
- package/dist/esm/serviceUtils.js +518 -0
- package/dist/esm/serviceUtils.js.map +1 -0
- package/dist/esm/synonymMapHelper.d.ts +10 -0
- package/dist/esm/synonymMapHelper.d.ts.map +1 -0
- package/{dist-esm/src → dist/esm}/synonymMapHelper.js.map +1 -1
- package/dist/esm/tracing.d.ts +7 -0
- package/dist/esm/tracing.d.ts.map +1 -0
- package/dist/esm/tracing.js +13 -0
- package/dist/esm/tracing.js.map +1 -0
- package/dist/esm/walk.d.ts +2 -0
- package/dist/esm/walk.d.ts.map +1 -0
- package/dist/esm/walk.js +76 -0
- package/dist/esm/walk.js.map +1 -0
- package/dist/react-native/base64.d.ts +11 -0
- package/dist/react-native/base64.d.ts.map +1 -0
- package/dist/react-native/base64.js +17 -0
- package/dist/react-native/base64.js.map +1 -0
- package/dist/react-native/errorModels.d.ts +50 -0
- package/dist/react-native/errorModels.d.ts.map +1 -0
- package/dist/react-native/errorModels.js +4 -0
- package/dist/react-native/errorModels.js.map +1 -0
- package/dist/react-native/generated/data/index.d.ts +4 -0
- package/dist/react-native/generated/data/index.d.ts.map +1 -0
- package/dist/react-native/generated/data/index.js +11 -0
- package/dist/react-native/generated/data/index.js.map +1 -0
- package/dist/react-native/generated/data/models/index.d.ts +783 -0
- package/dist/react-native/generated/data/models/index.d.ts.map +1 -0
- package/dist/react-native/generated/data/models/index.js +80 -0
- package/dist/react-native/generated/data/models/index.js.map +1 -0
- package/dist/react-native/generated/data/models/mappers.d.ts +34 -0
- package/dist/react-native/generated/data/models/mappers.d.ts.map +1 -0
- package/dist/react-native/generated/data/models/mappers.js +1088 -0
- package/dist/react-native/generated/data/models/mappers.js.map +1 -0
- package/dist/react-native/generated/data/models/parameters.d.ts +58 -0
- package/dist/react-native/generated/data/models/parameters.d.ts.map +1 -0
- package/dist/react-native/generated/data/models/parameters.js +577 -0
- package/dist/react-native/generated/data/models/parameters.js.map +1 -0
- package/dist/react-native/generated/data/operations/documents.d.ts +70 -0
- package/dist/react-native/generated/data/operations/documents.d.ts.map +1 -0
- package/dist/react-native/generated/data/operations/documents.js +315 -0
- package/dist/react-native/generated/data/operations/documents.js.map +1 -0
- package/dist/react-native/generated/data/operations/index.d.ts +2 -0
- package/dist/react-native/generated/data/operations/index.d.ts.map +1 -0
- package/dist/react-native/generated/data/operations/index.js +9 -0
- package/dist/react-native/generated/data/operations/index.js.map +1 -0
- package/dist/react-native/generated/data/operationsInterfaces/documents.d.ts +62 -0
- package/dist/react-native/generated/data/operationsInterfaces/documents.d.ts.map +1 -0
- package/dist/react-native/generated/data/operationsInterfaces/documents.js +9 -0
- package/dist/react-native/generated/data/operationsInterfaces/documents.js.map +1 -0
- package/dist/react-native/generated/data/operationsInterfaces/index.d.ts +2 -0
- package/dist/react-native/generated/data/operationsInterfaces/index.d.ts.map +1 -0
- package/dist/react-native/generated/data/operationsInterfaces/index.js +9 -0
- package/dist/react-native/generated/data/operationsInterfaces/index.js.map +1 -0
- package/dist/react-native/generated/data/searchClient.d.ts +21 -0
- package/dist/react-native/generated/data/searchClient.d.ts.map +1 -0
- package/dist/react-native/generated/data/searchClient.js +88 -0
- package/dist/react-native/generated/data/searchClient.js.map +1 -0
- package/dist/react-native/generated/service/index.d.ts +4 -0
- package/dist/react-native/generated/service/index.d.ts.map +1 -0
- package/dist/react-native/generated/service/index.js +11 -0
- package/dist/react-native/generated/service/index.js.map +1 -0
- package/dist/react-native/generated/service/models/index.d.ts +4428 -0
- package/dist/react-native/generated/service/models/index.d.ts.map +1 -0
- package/dist/react-native/generated/service/models/index.js +1456 -0
- package/dist/react-native/generated/service/models/index.js.map +1 -0
- package/dist/react-native/generated/service/models/mappers.d.ts +272 -0
- package/dist/react-native/generated/service/models/mappers.d.ts.map +1 -0
- package/dist/react-native/generated/service/models/mappers.js +6071 -0
- package/dist/react-native/generated/service/models/mappers.js.map +1 -0
- package/dist/react-native/generated/service/models/parameters.d.ts +22 -0
- package/dist/react-native/generated/service/models/parameters.d.ts.map +1 -0
- package/dist/react-native/generated/service/models/parameters.js +173 -0
- package/dist/react-native/generated/service/models/parameters.js.map +1 -0
- package/dist/react-native/generated/service/operations/aliases.d.ts +2 -0
- package/dist/react-native/generated/service/operations/aliases.d.ts.map +1 -0
- package/dist/react-native/generated/service/operations/aliases.js +201 -0
- package/dist/react-native/generated/service/operations/aliases.js.map +1 -0
- package/dist/react-native/generated/service/operations/dataSources.d.ts +43 -0
- package/dist/react-native/generated/service/operations/dataSources.d.ts.map +1 -0
- package/dist/react-native/generated/service/operations/dataSources.js +160 -0
- package/dist/react-native/generated/service/operations/dataSources.js.map +1 -0
- package/dist/react-native/generated/service/operations/index.d.ts +6 -0
- package/dist/react-native/generated/service/operations/index.d.ts.map +1 -0
- package/dist/react-native/generated/service/operations/index.js +13 -0
- package/dist/react-native/generated/service/operations/index.js.map +1 -0
- package/dist/react-native/generated/service/operations/indexers.d.ts +61 -0
- package/dist/react-native/generated/service/operations/indexers.d.ts.map +1 -0
- package/dist/react-native/generated/service/operations/indexers.js +228 -0
- package/dist/react-native/generated/service/operations/indexers.js.map +1 -0
- package/dist/react-native/generated/service/operations/indexes.d.ts +58 -0
- package/dist/react-native/generated/service/operations/indexes.d.ts.map +1 -0
- package/dist/react-native/generated/service/operations/indexes.js +213 -0
- package/dist/react-native/generated/service/operations/indexes.js.map +1 -0
- package/dist/react-native/generated/service/operations/skillsets.d.ts +43 -0
- package/dist/react-native/generated/service/operations/skillsets.d.ts.map +1 -0
- package/dist/react-native/generated/service/operations/skillsets.js +160 -0
- package/dist/react-native/generated/service/operations/skillsets.js.map +1 -0
- package/dist/react-native/generated/service/operations/synonymMaps.d.ts +43 -0
- package/dist/react-native/generated/service/operations/synonymMaps.d.ts.map +1 -0
- package/dist/react-native/generated/service/operations/synonymMaps.js +160 -0
- package/dist/react-native/generated/service/operations/synonymMaps.js.map +1 -0
- package/dist/react-native/generated/service/operationsInterfaces/aliases.d.ts +2 -0
- package/dist/react-native/generated/service/operationsInterfaces/aliases.d.ts.map +1 -0
- package/dist/react-native/generated/service/operationsInterfaces/aliases.js +68 -0
- package/dist/react-native/generated/service/operationsInterfaces/aliases.js.map +1 -0
- package/dist/react-native/generated/service/operationsInterfaces/dataSources.d.ts +35 -0
- package/dist/react-native/generated/service/operationsInterfaces/dataSources.d.ts.map +1 -0
- package/dist/react-native/generated/service/operationsInterfaces/dataSources.js +9 -0
- package/dist/react-native/generated/service/operationsInterfaces/dataSources.js.map +1 -0
- package/dist/react-native/generated/service/operationsInterfaces/index.d.ts +6 -0
- package/dist/react-native/generated/service/operationsInterfaces/index.d.ts.map +1 -0
- package/dist/react-native/generated/service/operationsInterfaces/index.js +13 -0
- package/dist/react-native/generated/service/operationsInterfaces/index.js.map +1 -0
- package/dist/react-native/generated/service/operationsInterfaces/indexers.d.ts +53 -0
- package/dist/react-native/generated/service/operationsInterfaces/indexers.d.ts.map +1 -0
- package/dist/react-native/generated/service/operationsInterfaces/indexers.js +9 -0
- package/dist/react-native/generated/service/operationsInterfaces/indexers.js.map +1 -0
- package/dist/react-native/generated/service/operationsInterfaces/indexes.d.ts +50 -0
- package/dist/react-native/generated/service/operationsInterfaces/indexes.d.ts.map +1 -0
- package/dist/react-native/generated/service/operationsInterfaces/indexes.js +9 -0
- package/dist/react-native/generated/service/operationsInterfaces/indexes.js.map +1 -0
- package/dist/react-native/generated/service/operationsInterfaces/skillsets.d.ts +35 -0
- package/dist/react-native/generated/service/operationsInterfaces/skillsets.d.ts.map +1 -0
- package/dist/react-native/generated/service/operationsInterfaces/skillsets.js +9 -0
- package/dist/react-native/generated/service/operationsInterfaces/skillsets.js.map +1 -0
- package/dist/react-native/generated/service/operationsInterfaces/synonymMaps.d.ts +35 -0
- package/dist/react-native/generated/service/operationsInterfaces/synonymMaps.d.ts.map +1 -0
- package/dist/react-native/generated/service/operationsInterfaces/synonymMaps.js +9 -0
- package/dist/react-native/generated/service/operationsInterfaces/synonymMaps.js.map +1 -0
- package/dist/react-native/generated/service/searchServiceClient.d.ts +28 -0
- package/dist/react-native/generated/service/searchServiceClient.d.ts.map +1 -0
- package/dist/react-native/generated/service/searchServiceClient.js +116 -0
- package/dist/react-native/generated/service/searchServiceClient.js.map +1 -0
- package/dist/react-native/geographyPoint.d.ts +27 -0
- package/dist/react-native/geographyPoint.d.ts.map +1 -0
- package/dist/react-native/geographyPoint.js +36 -0
- package/dist/react-native/geographyPoint.js.map +1 -0
- package/dist/react-native/index.d.ts +15 -0
- package/dist/react-native/index.d.ts.map +1 -0
- package/dist/react-native/index.js +16 -0
- package/dist/react-native/index.js.map +1 -0
- package/dist/react-native/indexDocumentsBatch.d.ts +41 -0
- package/dist/react-native/indexDocumentsBatch.d.ts.map +1 -0
- package/dist/react-native/indexDocumentsBatch.js +79 -0
- package/dist/react-native/indexDocumentsBatch.js.map +1 -0
- package/dist/react-native/indexModels.d.ts +722 -0
- package/dist/react-native/indexModels.d.ts.map +1 -0
- package/dist/react-native/indexModels.js +4 -0
- package/dist/react-native/indexModels.js.map +1 -0
- package/dist/react-native/logger.d.ts +5 -0
- package/dist/react-native/logger.d.ts.map +1 -0
- package/dist/react-native/logger.js +8 -0
- package/dist/react-native/logger.js.map +1 -0
- package/dist/react-native/odata.d.ts +14 -0
- package/dist/react-native/odata.d.ts.map +1 -0
- package/dist/react-native/odata.js +47 -0
- package/dist/react-native/odata.js.map +1 -0
- package/dist/react-native/odataMetadataPolicy.d.ts +9 -0
- package/dist/react-native/odataMetadataPolicy.d.ts.map +1 -0
- package/dist/react-native/odataMetadataPolicy.js +18 -0
- package/dist/react-native/odataMetadataPolicy.js.map +1 -0
- package/dist/react-native/package.json +3 -0
- package/dist/react-native/searchApiKeyCredentialPolicy.d.ts +8 -0
- package/dist/react-native/searchApiKeyCredentialPolicy.d.ts.map +1 -0
- package/dist/react-native/searchApiKeyCredentialPolicy.js +20 -0
- package/dist/react-native/searchApiKeyCredentialPolicy.js.map +1 -0
- package/dist/react-native/searchAudience.d.ts +18 -0
- package/dist/react-native/searchAudience.d.ts.map +1 -0
- package/dist/react-native/searchAudience.js +21 -0
- package/dist/react-native/searchAudience.js.map +1 -0
- package/dist/react-native/searchClient.d.ts +266 -0
- package/dist/react-native/searchClient.d.ts.map +1 -0
- package/dist/react-native/searchClient.js +664 -0
- package/dist/react-native/searchClient.js.map +1 -0
- package/dist/react-native/searchIndexClient.d.ts +177 -0
- package/dist/react-native/searchIndexClient.d.ts.map +1 -0
- package/dist/react-native/searchIndexClient.js +509 -0
- package/dist/react-native/searchIndexClient.js.map +1 -0
- package/dist/react-native/searchIndexerClient.d.ts +187 -0
- package/dist/react-native/searchIndexerClient.d.ts.map +1 -0
- package/dist/react-native/searchIndexerClient.js +586 -0
- package/dist/react-native/searchIndexerClient.js.map +1 -0
- package/dist/react-native/searchIndexingBufferedSender.d.ts +193 -0
- package/dist/react-native/searchIndexingBufferedSender.d.ts.map +1 -0
- package/dist/react-native/searchIndexingBufferedSender.js +322 -0
- package/dist/react-native/searchIndexingBufferedSender.js.map +1 -0
- package/dist/react-native/serialization.d.ts +3 -0
- package/dist/react-native/serialization.d.ts.map +1 -0
- package/dist/react-native/serialization.js +144 -0
- package/dist/react-native/serialization.js.map +1 -0
- package/dist/react-native/serviceModels.d.ts +2181 -0
- package/dist/react-native/serviceModels.d.ts.map +1 -0
- package/dist/react-native/serviceModels.js +654 -0
- package/dist/react-native/serviceModels.js.map +1 -0
- package/dist/react-native/serviceUtils.d.ts +35 -0
- package/dist/react-native/serviceUtils.d.ts.map +1 -0
- package/dist/react-native/serviceUtils.js +518 -0
- package/dist/react-native/serviceUtils.js.map +1 -0
- package/dist/react-native/synonymMapHelper.d.ts +10 -0
- package/dist/react-native/synonymMapHelper.d.ts.map +1 -0
- package/dist/react-native/synonymMapHelper.js +24 -0
- package/dist/react-native/synonymMapHelper.js.map +1 -0
- package/dist/react-native/tracing.d.ts +7 -0
- package/dist/react-native/tracing.d.ts.map +1 -0
- package/dist/react-native/tracing.js +13 -0
- package/dist/react-native/tracing.js.map +1 -0
- package/dist/react-native/walk.d.ts +2 -0
- package/dist/react-native/walk.d.ts.map +1 -0
- package/dist/react-native/walk.js +76 -0
- package/dist/react-native/walk.js.map +1 -0
- package/package.json +88 -81
- package/dist/index.js +0 -15948
- package/dist/index.js.map +0 -1
- package/dist-esm/src/base64.browser.js.map +0 -1
- package/dist-esm/src/errorModels.js.map +0 -1
- package/dist-esm/src/generated/data/index.js.map +0 -1
- package/dist-esm/src/generated/data/models/index.js +0 -286
- package/dist-esm/src/generated/data/models/index.js.map +0 -1
- package/dist-esm/src/generated/data/models/mappers.js.map +0 -1
- package/dist-esm/src/generated/data/models/parameters.js.map +0 -1
- package/dist-esm/src/generated/data/operations/documents.js.map +0 -1
- package/dist-esm/src/generated/data/operations/index.js.map +0 -1
- package/dist-esm/src/generated/data/operationsInterfaces/documents.js.map +0 -1
- package/dist-esm/src/generated/data/operationsInterfaces/index.js.map +0 -1
- package/dist-esm/src/generated/data/searchClient.js.map +0 -1
- package/dist-esm/src/generated/service/index.js.map +0 -1
- package/dist-esm/src/generated/service/models/index.js.map +0 -1
- package/dist-esm/src/generated/service/models/mappers.js.map +0 -1
- package/dist-esm/src/generated/service/models/parameters.js.map +0 -1
- package/dist-esm/src/generated/service/operations/aliases.js.map +0 -1
- package/dist-esm/src/generated/service/operations/dataSources.js.map +0 -1
- package/dist-esm/src/generated/service/operations/index.js +0 -14
- package/dist-esm/src/generated/service/operations/index.js.map +0 -1
- package/dist-esm/src/generated/service/operations/indexers.js.map +0 -1
- package/dist-esm/src/generated/service/operations/indexes.js.map +0 -1
- package/dist-esm/src/generated/service/operations/skillsets.js.map +0 -1
- package/dist-esm/src/generated/service/operations/synonymMaps.js.map +0 -1
- package/dist-esm/src/generated/service/operationsInterfaces/aliases.js.map +0 -1
- package/dist-esm/src/generated/service/operationsInterfaces/dataSources.js.map +0 -1
- package/dist-esm/src/generated/service/operationsInterfaces/index.js +0 -14
- package/dist-esm/src/generated/service/operationsInterfaces/index.js.map +0 -1
- package/dist-esm/src/generated/service/operationsInterfaces/indexers.js.map +0 -1
- package/dist-esm/src/generated/service/operationsInterfaces/indexes.js.map +0 -1
- package/dist-esm/src/generated/service/operationsInterfaces/skillsets.js.map +0 -1
- package/dist-esm/src/generated/service/operationsInterfaces/synonymMaps.js.map +0 -1
- package/dist-esm/src/generated/service/searchServiceClient.js.map +0 -1
- package/dist-esm/src/geographyPoint.js.map +0 -1
- package/dist-esm/src/index.js +0 -16
- package/dist-esm/src/index.js.map +0 -1
- package/dist-esm/src/indexDocumentsBatch.js.map +0 -1
- package/dist-esm/src/indexModels.js.map +0 -1
- package/dist-esm/src/odataMetadataPolicy.js.map +0 -1
- package/dist-esm/src/searchApiKeyCredentialPolicy.js.map +0 -1
- package/dist-esm/src/searchClient.js.map +0 -1
- package/dist-esm/src/searchIndexClient.js.map +0 -1
- package/dist-esm/src/searchIndexerClient.js.map +0 -1
- package/dist-esm/src/searchIndexingBufferedSender.js.map +0 -1
- package/dist-esm/src/serialization.js.map +0 -1
- package/dist-esm/src/serviceModels.js.map +0 -1
- package/dist-esm/src/serviceUtils.js.map +0 -1
- package/dist-esm/src/synonymMapHelper.browser.js.map +0 -1
- package/dist-esm/src/walk.js.map +0 -1
- package/types/search-documents.d.ts +0 -8041
- /package/{dist-esm/src → dist/browser}/errorModels.js +0 -0
- /package/{dist-esm/src → dist/browser}/generated/data/operationsInterfaces/documents.js +0 -0
- /package/{dist-esm/src → dist/browser}/generated/service/operationsInterfaces/dataSources.js +0 -0
- /package/{dist-esm/src → dist/browser}/generated/service/operationsInterfaces/indexers.js +0 -0
- /package/{dist-esm/src → dist/browser}/generated/service/operationsInterfaces/indexes.js +0 -0
- /package/{dist-esm/src → dist/browser}/generated/service/operationsInterfaces/skillsets.js +0 -0
- /package/{dist-esm/src → dist/browser}/generated/service/operationsInterfaces/synonymMaps.js +0 -0
- /package/{dist-esm/src → dist/browser}/indexModels.js +0 -0
- /package/{dist-esm/src → dist/browser}/logger.js +0 -0
- /package/{dist-esm/src → dist/browser}/logger.js.map +0 -0
- /package/{dist-esm/src → dist/browser}/odata.js +0 -0
- /package/{dist-esm/src → dist/browser}/odata.js.map +0 -0
- /package/{dist-esm/src → dist/browser}/odataMetadataPolicy.js +0 -0
- /package/{dist-esm/src → dist/browser}/searchApiKeyCredentialPolicy.js +0 -0
- /package/{dist-esm/src → dist/browser}/searchAudience.js +0 -0
- /package/{dist-esm/src → dist/browser}/searchAudience.js.map +0 -0
- /package/{dist-esm/src → dist/browser}/serviceModels.js +0 -0
- /package/{dist-esm/src → dist/browser}/tracing.js +0 -0
- /package/{dist-esm/src → dist/browser}/tracing.js.map +0 -0
- /package/{dist-esm/src → dist/esm}/base64.js +0 -0
- /package/{dist-esm/src → dist/esm}/base64.js.map +0 -0
- /package/{dist-esm/src → dist/esm}/synonymMapHelper.js +0 -0
|
@@ -0,0 +1,4428 @@
|
|
|
1
|
+
import * as coreClient from "@azure/core-client";
|
|
2
|
+
import * as coreHttpCompat from "@azure/core-http-compat";
|
|
3
|
+
export type DataChangeDetectionPolicyUnion = DataChangeDetectionPolicy | HighWaterMarkChangeDetectionPolicy | SqlIntegratedChangeTrackingPolicy;
|
|
4
|
+
export type DataDeletionDetectionPolicyUnion = DataDeletionDetectionPolicy | SoftDeleteColumnDeletionDetectionPolicy;
|
|
5
|
+
export type SearchIndexerSkillUnion = SearchIndexerSkill | ConditionalSkill | KeyPhraseExtractionSkill | OcrSkill | ImageAnalysisSkill | LanguageDetectionSkill | ShaperSkill | MergeSkill | EntityRecognitionSkill | SentimentSkill | SentimentSkillV3 | EntityLinkingSkill | EntityRecognitionSkillV3 | PIIDetectionSkill | SplitSkill | CustomEntityLookupSkill | TextTranslationSkill | DocumentExtractionSkill | DocumentIntelligenceLayoutSkill | WebApiSkill | AzureOpenAIEmbeddingSkill;
|
|
6
|
+
export type CognitiveServicesAccountUnion = CognitiveServicesAccount | DefaultCognitiveServicesAccount | CognitiveServicesAccountKey;
|
|
7
|
+
export type ScoringFunctionUnion = ScoringFunction | DistanceScoringFunction | FreshnessScoringFunction | MagnitudeScoringFunction | TagScoringFunction;
|
|
8
|
+
export type LexicalAnalyzerUnion = LexicalAnalyzer | CustomAnalyzer | PatternAnalyzer | LuceneStandardAnalyzer | StopAnalyzer;
|
|
9
|
+
export type LexicalTokenizerUnion = LexicalTokenizer | ClassicTokenizer | EdgeNGramTokenizer | KeywordTokenizer | KeywordTokenizerV2 | MicrosoftLanguageTokenizer | MicrosoftLanguageStemmingTokenizer | NGramTokenizer | PathHierarchyTokenizerV2 | PatternTokenizer | LuceneStandardTokenizer | LuceneStandardTokenizerV2 | UaxUrlEmailTokenizer;
|
|
10
|
+
export type TokenFilterUnion = TokenFilter | AsciiFoldingTokenFilter | CjkBigramTokenFilter | CommonGramTokenFilter | DictionaryDecompounderTokenFilter | EdgeNGramTokenFilter | EdgeNGramTokenFilterV2 | ElisionTokenFilter | KeepTokenFilter | KeywordMarkerTokenFilter | LengthTokenFilter | LimitTokenFilter | NGramTokenFilter | NGramTokenFilterV2 | PatternCaptureTokenFilter | PatternReplaceTokenFilter | PhoneticTokenFilter | ShingleTokenFilter | SnowballTokenFilter | StemmerTokenFilter | StemmerOverrideTokenFilter | StopwordsTokenFilter | SynonymTokenFilter | TruncateTokenFilter | UniqueTokenFilter | WordDelimiterTokenFilter;
|
|
11
|
+
export type CharFilterUnion = CharFilter | MappingCharFilter | PatternReplaceCharFilter;
|
|
12
|
+
export type BaseLexicalNormalizerUnion = BaseLexicalNormalizer | CustomLexicalNormalizer;
|
|
13
|
+
export type SimilarityUnion = Similarity | ClassicSimilarity | BM25Similarity;
|
|
14
|
+
export type VectorSearchAlgorithmConfigurationUnion = VectorSearchAlgorithmConfiguration | HnswAlgorithmConfiguration | ExhaustiveKnnAlgorithmConfiguration;
|
|
15
|
+
export type VectorSearchVectorizerUnion = VectorSearchVectorizer | AzureOpenAIVectorizer | WebApiVectorizer;
|
|
16
|
+
export type VectorSearchCompressionUnion = VectorSearchCompression | ScalarQuantizationCompression | BinaryQuantizationCompression;
|
|
17
|
+
export type SearchIndexerDataIdentityUnion = SearchIndexerDataIdentity | SearchIndexerDataNoneIdentity | SearchIndexerDataUserAssignedIdentity;
|
|
18
|
+
/** Represents a datasource definition, which can be used to configure an indexer. */
|
|
19
|
+
export interface SearchIndexerDataSource {
|
|
20
|
+
/** The name of the datasource. */
|
|
21
|
+
name: string;
|
|
22
|
+
/** The description of the datasource. */
|
|
23
|
+
description?: string;
|
|
24
|
+
/** The type of the datasource. */
|
|
25
|
+
type: SearchIndexerDataSourceType;
|
|
26
|
+
/** Credentials for the datasource. */
|
|
27
|
+
credentials: DataSourceCredentials;
|
|
28
|
+
/** The data container for the datasource. */
|
|
29
|
+
container: SearchIndexerDataContainer;
|
|
30
|
+
/** The data change detection policy for the datasource. */
|
|
31
|
+
dataChangeDetectionPolicy?: DataChangeDetectionPolicyUnion;
|
|
32
|
+
/** The data deletion detection policy for the datasource. */
|
|
33
|
+
dataDeletionDetectionPolicy?: DataDeletionDetectionPolicyUnion;
|
|
34
|
+
/** The ETag of the data source. */
|
|
35
|
+
etag?: string;
|
|
36
|
+
/** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your datasource definition when you want full assurance that no one, not even Microsoft, can decrypt your data source definition. Once you have encrypted your data source definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your datasource definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */
|
|
37
|
+
encryptionKey?: SearchResourceEncryptionKey;
|
|
38
|
+
}
|
|
39
|
+
/** Represents credentials that can be used to connect to a datasource. */
|
|
40
|
+
export interface DataSourceCredentials {
|
|
41
|
+
/** The connection string for the datasource. For Azure SQL, Azure Blob, ADLS Gen 2 and Azure Table, this would be the connection string or resource ID if using managed identity. For CosmosDB this would be a formatted connection string specifying ApiKind or resource ID for managed identity. For Onelake files, connection string would be either the workspace guid or workspace FQDN; Onelake only supports managed identity connections. Set to `<unchanged>` (with brackets) if you don't want the connection string updated. Set to `<redacted>` if you want to remove the connection string value from the datasource. */
|
|
42
|
+
connectionString?: string;
|
|
43
|
+
}
|
|
44
|
+
/** Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. */
|
|
45
|
+
export interface SearchIndexerDataContainer {
|
|
46
|
+
/** The name of the table or view (for Azure SQL datasource), collection (for CosmosDB datasource), container (for Azure Blob and ADLS Gen 2 datasources), Azure Table (for Azure Table datasource), or lakehouse (for Onelake datasource) that will be indexed. */
|
|
47
|
+
name: string;
|
|
48
|
+
/** A query that is applied to this data container. For CosmosDB datasource query can flatten and filter data. For Azure Blob and ADLS Gen 2 query can filter by folders. For Azure Table query can filter by row data. For Onelake query can filter by folder or shortcut. Not supported by Azure SQL datasources. */
|
|
49
|
+
query?: string;
|
|
50
|
+
}
|
|
51
|
+
/** Base type for data change detection policies. */
|
|
52
|
+
export interface DataChangeDetectionPolicy {
|
|
53
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
54
|
+
odatatype: "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy" | "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy";
|
|
55
|
+
}
|
|
56
|
+
/** Base type for data deletion detection policies. */
|
|
57
|
+
export interface DataDeletionDetectionPolicy {
|
|
58
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
59
|
+
odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy";
|
|
60
|
+
}
|
|
61
|
+
/** A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest, such as indexes and synonym maps. */
|
|
62
|
+
export interface SearchResourceEncryptionKey {
|
|
63
|
+
/** The name of your Azure Key Vault key to be used to encrypt your data at rest. */
|
|
64
|
+
keyName: string;
|
|
65
|
+
/** The version of your Azure Key Vault key to be used to encrypt your data at rest. */
|
|
66
|
+
keyVersion: string;
|
|
67
|
+
/** The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be used to encrypt your data at rest. An example URI might be `https://my-keyvault-name.vault.azure.net`. */
|
|
68
|
+
vaultUri: string;
|
|
69
|
+
/** Optional Azure Active Directory credentials used for accessing your Azure Key Vault. Not required if using managed identity instead. */
|
|
70
|
+
accessCredentials?: AzureActiveDirectoryApplicationCredentials;
|
|
71
|
+
}
|
|
72
|
+
/** Credentials of a registered application created for your search service, used for authenticated access to the encryption keys stored in Azure Key Vault. */
|
|
73
|
+
export interface AzureActiveDirectoryApplicationCredentials {
|
|
74
|
+
/** An AAD Application ID that was granted the required access permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The Application ID should not be confused with the Object ID for your AAD Application. */
|
|
75
|
+
applicationId: string;
|
|
76
|
+
/** The authentication key of the specified AAD application. */
|
|
77
|
+
applicationSecret?: string;
|
|
78
|
+
}
|
|
79
|
+
/** Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). */
|
|
80
|
+
export interface ErrorResponse {
|
|
81
|
+
/** The error object. */
|
|
82
|
+
error?: ErrorDetail;
|
|
83
|
+
}
|
|
84
|
+
/** The error detail. */
|
|
85
|
+
export interface ErrorDetail {
|
|
86
|
+
/**
|
|
87
|
+
* The error code.
|
|
88
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
89
|
+
*/
|
|
90
|
+
readonly code?: string;
|
|
91
|
+
/**
|
|
92
|
+
* The error message.
|
|
93
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
94
|
+
*/
|
|
95
|
+
readonly message?: string;
|
|
96
|
+
/**
|
|
97
|
+
* The error target.
|
|
98
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
99
|
+
*/
|
|
100
|
+
readonly target?: string;
|
|
101
|
+
/**
|
|
102
|
+
* The error details.
|
|
103
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
104
|
+
*/
|
|
105
|
+
readonly details?: ErrorDetail[];
|
|
106
|
+
/**
|
|
107
|
+
* The error additional info.
|
|
108
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
109
|
+
*/
|
|
110
|
+
readonly additionalInfo?: ErrorAdditionalInfo[];
|
|
111
|
+
}
|
|
112
|
+
/** The resource management error additional info. */
|
|
113
|
+
export interface ErrorAdditionalInfo {
|
|
114
|
+
/**
|
|
115
|
+
* The additional info type.
|
|
116
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
117
|
+
*/
|
|
118
|
+
readonly type?: string;
|
|
119
|
+
/**
|
|
120
|
+
* The additional info.
|
|
121
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
122
|
+
*/
|
|
123
|
+
readonly info?: Record<string, unknown>;
|
|
124
|
+
}
|
|
125
|
+
/** Response from a List Datasources request. If successful, it includes the full definitions of all datasources. */
|
|
126
|
+
export interface ListDataSourcesResult {
|
|
127
|
+
/**
|
|
128
|
+
* The datasources in the Search service.
|
|
129
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
130
|
+
*/
|
|
131
|
+
readonly dataSources: SearchIndexerDataSource[];
|
|
132
|
+
}
|
|
133
|
+
/** Represents an indexer. */
|
|
134
|
+
export interface SearchIndexer {
|
|
135
|
+
/** The name of the indexer. */
|
|
136
|
+
name: string;
|
|
137
|
+
/** The description of the indexer. */
|
|
138
|
+
description?: string;
|
|
139
|
+
/** The name of the datasource from which this indexer reads data. */
|
|
140
|
+
dataSourceName: string;
|
|
141
|
+
/** The name of the skillset executing with this indexer. */
|
|
142
|
+
skillsetName?: string;
|
|
143
|
+
/** The name of the index to which this indexer writes data. */
|
|
144
|
+
targetIndexName: string;
|
|
145
|
+
/** The schedule for this indexer. */
|
|
146
|
+
schedule?: IndexingSchedule;
|
|
147
|
+
/** Parameters for indexer execution. */
|
|
148
|
+
parameters?: IndexingParameters;
|
|
149
|
+
/** Defines mappings between fields in the data source and corresponding target fields in the index. */
|
|
150
|
+
fieldMappings?: FieldMapping[];
|
|
151
|
+
/** Output field mappings are applied after enrichment and immediately before indexing. */
|
|
152
|
+
outputFieldMappings?: FieldMapping[];
|
|
153
|
+
/** A value indicating whether the indexer is disabled. Default is false. */
|
|
154
|
+
isDisabled?: boolean;
|
|
155
|
+
/** The ETag of the indexer. */
|
|
156
|
+
etag?: string;
|
|
157
|
+
/** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your indexer definition (as well as indexer execution status) when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have encrypted your indexer definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your indexer definition (and indexer execution status) will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */
|
|
158
|
+
encryptionKey?: SearchResourceEncryptionKey;
|
|
159
|
+
}
|
|
160
|
+
/** Represents a schedule for indexer execution. */
|
|
161
|
+
export interface IndexingSchedule {
|
|
162
|
+
/** The interval of time between indexer executions. */
|
|
163
|
+
interval: string;
|
|
164
|
+
/** The time when an indexer should start running. */
|
|
165
|
+
startTime?: Date;
|
|
166
|
+
}
|
|
167
|
+
/** Represents parameters for indexer execution. */
|
|
168
|
+
export interface IndexingParameters {
|
|
169
|
+
/** The number of items that are read from the data source and indexed as a single batch in order to improve performance. The default depends on the data source type. */
|
|
170
|
+
batchSize?: number;
|
|
171
|
+
/** The maximum number of items that can fail indexing for indexer execution to still be considered successful. -1 means no limit. Default is 0. */
|
|
172
|
+
maxFailedItems?: number;
|
|
173
|
+
/** The maximum number of items in a single batch that can fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0. */
|
|
174
|
+
maxFailedItemsPerBatch?: number;
|
|
175
|
+
/** A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */
|
|
176
|
+
configuration?: IndexingParametersConfiguration;
|
|
177
|
+
}
|
|
178
|
+
/** A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */
|
|
179
|
+
export interface IndexingParametersConfiguration {
|
|
180
|
+
/** Describes unknown properties. The value of an unknown property can be of "any" type. */
|
|
181
|
+
[property: string]: any;
|
|
182
|
+
/** Represents the parsing mode for indexing from an Azure blob data source. */
|
|
183
|
+
parsingMode?: BlobIndexerParsingMode;
|
|
184
|
+
/** Comma-delimited list of filename extensions to ignore when processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip over those files during indexing. */
|
|
185
|
+
excludedFileNameExtensions?: string;
|
|
186
|
+
/** Comma-delimited list of filename extensions to select when processing from Azure blob storage. For example, you could focus indexing on specific application files ".docx, .pptx, .msg" to specifically include those file types. */
|
|
187
|
+
indexedFileNameExtensions?: string;
|
|
188
|
+
/** For Azure blobs, set to false if you want to continue indexing when an unsupported content type is encountered, and you don't know all the content types (file extensions) in advance. */
|
|
189
|
+
failOnUnsupportedContentType?: boolean;
|
|
190
|
+
/** For Azure blobs, set to false if you want to continue indexing if a document fails indexing. */
|
|
191
|
+
failOnUnprocessableDocument?: boolean;
|
|
192
|
+
/** For Azure blobs, set this property to true to still index storage metadata for blob content that is too large to process. Oversized blobs are treated as errors by default. For limits on blob size, see https://learn.microsoft.com/azure/search/search-limits-quotas-capacity. */
|
|
193
|
+
indexStorageMetadataOnlyForOversizedDocuments?: boolean;
|
|
194
|
+
/** For CSV blobs, specifies a comma-delimited list of column headers, useful for mapping source fields to destination fields in an index. */
|
|
195
|
+
delimitedTextHeaders?: string;
|
|
196
|
+
/** For CSV blobs, specifies the end-of-line single-character delimiter for CSV files where each line starts a new document (for example, "|"). */
|
|
197
|
+
delimitedTextDelimiter?: string;
|
|
198
|
+
/** For CSV blobs, indicates that the first (non-blank) line of each blob contains headers. */
|
|
199
|
+
firstLineContainsHeaders?: boolean;
|
|
200
|
+
/** For JSON arrays, given a structured or semi-structured document, you can specify a path to the array using this property. */
|
|
201
|
+
documentRoot?: string;
|
|
202
|
+
/** Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when "imageAction" is set to a value other than "none". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. */
|
|
203
|
+
dataToExtract?: BlobIndexerDataToExtract;
|
|
204
|
+
/** Determines how to process embedded images and image files in Azure blob storage. Setting the "imageAction" configuration to any value other than "none" requires that a skillset also be attached to that indexer. */
|
|
205
|
+
imageAction?: BlobIndexerImageAction;
|
|
206
|
+
/** If true, will create a path //document//file_data that is an object representing the original file data downloaded from your blob data source. This allows you to pass the original file data to a custom skill for processing within the enrichment pipeline, or to the Document Extraction skill. */
|
|
207
|
+
allowSkillsetToReadFileData?: boolean;
|
|
208
|
+
/** Determines algorithm for text extraction from PDF files in Azure blob storage. */
|
|
209
|
+
pdfTextRotationAlgorithm?: BlobIndexerPDFTextRotationAlgorithm;
|
|
210
|
+
/** Specifies the environment in which the indexer should execute. */
|
|
211
|
+
executionEnvironment?: IndexerExecutionEnvironment;
|
|
212
|
+
/** Increases the timeout beyond the 5-minute default for Azure SQL database data sources, specified in the format "hh:mm:ss". */
|
|
213
|
+
queryTimeout?: string;
|
|
214
|
+
}
|
|
215
|
+
/** Defines a mapping between a field in a data source and a target field in an index. */
|
|
216
|
+
export interface FieldMapping {
|
|
217
|
+
/** The name of the field in the data source. */
|
|
218
|
+
sourceFieldName: string;
|
|
219
|
+
/** The name of the target field in the index. Same as the source field name by default. */
|
|
220
|
+
targetFieldName?: string;
|
|
221
|
+
/** A function to apply to each source field value before indexing. */
|
|
222
|
+
mappingFunction?: FieldMappingFunction;
|
|
223
|
+
}
|
|
224
|
+
/** Represents a function that transforms a value from a data source before indexing. */
|
|
225
|
+
export interface FieldMappingFunction {
|
|
226
|
+
/** The name of the field mapping function. */
|
|
227
|
+
name: string;
|
|
228
|
+
/** A dictionary of parameter name/value pairs to pass to the function. Each value must be of a primitive type. */
|
|
229
|
+
parameters?: {
|
|
230
|
+
[propertyName: string]: any;
|
|
231
|
+
};
|
|
232
|
+
}
|
|
233
|
+
/** Response from a List Indexers request. If successful, it includes the full definitions of all indexers. */
|
|
234
|
+
export interface ListIndexersResult {
|
|
235
|
+
/**
|
|
236
|
+
* The indexers in the Search service.
|
|
237
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
238
|
+
*/
|
|
239
|
+
readonly indexers: SearchIndexer[];
|
|
240
|
+
}
|
|
241
|
+
/** Represents the current status and execution history of an indexer. */
|
|
242
|
+
export interface SearchIndexerStatus {
|
|
243
|
+
/**
|
|
244
|
+
* The name of the indexer.
|
|
245
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
246
|
+
*/
|
|
247
|
+
readonly name: string;
|
|
248
|
+
/**
|
|
249
|
+
* Overall indexer status.
|
|
250
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
251
|
+
*/
|
|
252
|
+
readonly status: IndexerStatus;
|
|
253
|
+
/**
|
|
254
|
+
* The result of the most recent or an in-progress indexer execution.
|
|
255
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
256
|
+
*/
|
|
257
|
+
readonly lastResult?: IndexerExecutionResult;
|
|
258
|
+
/**
|
|
259
|
+
* History of the recent indexer executions, sorted in reverse chronological order.
|
|
260
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
261
|
+
*/
|
|
262
|
+
readonly executionHistory: IndexerExecutionResult[];
|
|
263
|
+
/**
|
|
264
|
+
* The execution limits for the indexer.
|
|
265
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
266
|
+
*/
|
|
267
|
+
readonly limits: SearchIndexerLimits;
|
|
268
|
+
}
|
|
269
|
+
/** Represents the result of an individual indexer execution. */
|
|
270
|
+
export interface IndexerExecutionResult {
|
|
271
|
+
/**
|
|
272
|
+
* The outcome of this indexer execution.
|
|
273
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
274
|
+
*/
|
|
275
|
+
readonly status: IndexerExecutionStatus;
|
|
276
|
+
/**
|
|
277
|
+
* The error message indicating the top-level error, if any.
|
|
278
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
279
|
+
*/
|
|
280
|
+
readonly errorMessage?: string;
|
|
281
|
+
/**
|
|
282
|
+
* The start time of this indexer execution.
|
|
283
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
284
|
+
*/
|
|
285
|
+
readonly startTime?: Date;
|
|
286
|
+
/**
|
|
287
|
+
* The end time of this indexer execution, if the execution has already completed.
|
|
288
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
289
|
+
*/
|
|
290
|
+
readonly endTime?: Date;
|
|
291
|
+
/**
|
|
292
|
+
* The item-level indexing errors.
|
|
293
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
294
|
+
*/
|
|
295
|
+
readonly errors: SearchIndexerError[];
|
|
296
|
+
/**
|
|
297
|
+
* The item-level indexing warnings.
|
|
298
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
299
|
+
*/
|
|
300
|
+
readonly warnings: SearchIndexerWarning[];
|
|
301
|
+
/**
|
|
302
|
+
* The number of items that were processed during this indexer execution. This includes both successfully processed items and items where indexing was attempted but failed.
|
|
303
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
304
|
+
*/
|
|
305
|
+
readonly itemCount: number;
|
|
306
|
+
/**
|
|
307
|
+
* The number of items that failed to be indexed during this indexer execution.
|
|
308
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
309
|
+
*/
|
|
310
|
+
readonly failedItemCount: number;
|
|
311
|
+
/**
|
|
312
|
+
* Change tracking state with which an indexer execution started.
|
|
313
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
314
|
+
*/
|
|
315
|
+
readonly initialTrackingState?: string;
|
|
316
|
+
/**
|
|
317
|
+
* Change tracking state with which an indexer execution finished.
|
|
318
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
319
|
+
*/
|
|
320
|
+
readonly finalTrackingState?: string;
|
|
321
|
+
}
|
|
322
|
+
/** Represents an item- or document-level indexing error. */
|
|
323
|
+
export interface SearchIndexerError {
|
|
324
|
+
/**
|
|
325
|
+
* The key of the item for which indexing failed.
|
|
326
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
327
|
+
*/
|
|
328
|
+
readonly key?: string;
|
|
329
|
+
/**
|
|
330
|
+
* The message describing the error that occurred while processing the item.
|
|
331
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
332
|
+
*/
|
|
333
|
+
readonly errorMessage: string;
|
|
334
|
+
/**
|
|
335
|
+
* The status code indicating why the indexing operation failed. Possible values include: 400 for a malformed input document, 404 for document not found, 409 for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the service is too busy.
|
|
336
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
337
|
+
*/
|
|
338
|
+
readonly statusCode: number;
|
|
339
|
+
/**
|
|
340
|
+
* The name of the source at which the error originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available.
|
|
341
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
342
|
+
*/
|
|
343
|
+
readonly name?: string;
|
|
344
|
+
/**
|
|
345
|
+
* Additional, verbose details about the error to assist in debugging the indexer. This may not be always available.
|
|
346
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
347
|
+
*/
|
|
348
|
+
readonly details?: string;
|
|
349
|
+
/**
|
|
350
|
+
* A link to a troubleshooting guide for these classes of errors. This may not be always available.
|
|
351
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
352
|
+
*/
|
|
353
|
+
readonly documentationLink?: string;
|
|
354
|
+
}
|
|
355
|
+
/** Represents an item-level warning. */
|
|
356
|
+
export interface SearchIndexerWarning {
|
|
357
|
+
/**
|
|
358
|
+
* The key of the item which generated a warning.
|
|
359
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
360
|
+
*/
|
|
361
|
+
readonly key?: string;
|
|
362
|
+
/**
|
|
363
|
+
* The message describing the warning that occurred while processing the item.
|
|
364
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
365
|
+
*/
|
|
366
|
+
readonly message: string;
|
|
367
|
+
/**
|
|
368
|
+
* The name of the source at which the warning originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available.
|
|
369
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
370
|
+
*/
|
|
371
|
+
readonly name?: string;
|
|
372
|
+
/**
|
|
373
|
+
* Additional, verbose details about the warning to assist in debugging the indexer. This may not be always available.
|
|
374
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
375
|
+
*/
|
|
376
|
+
readonly details?: string;
|
|
377
|
+
/**
|
|
378
|
+
* A link to a troubleshooting guide for these classes of warnings. This may not be always available.
|
|
379
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
380
|
+
*/
|
|
381
|
+
readonly documentationLink?: string;
|
|
382
|
+
}
|
|
383
|
+
export interface SearchIndexerLimits {
|
|
384
|
+
/**
|
|
385
|
+
* The maximum duration that the indexer is permitted to run for one execution.
|
|
386
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
387
|
+
*/
|
|
388
|
+
readonly maxRunTime?: string;
|
|
389
|
+
/**
|
|
390
|
+
* The maximum size of a document, in bytes, which will be considered valid for indexing.
|
|
391
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
392
|
+
*/
|
|
393
|
+
readonly maxDocumentExtractionSize?: number;
|
|
394
|
+
/**
|
|
395
|
+
* The maximum number of characters that will be extracted from a document picked up for indexing.
|
|
396
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
397
|
+
*/
|
|
398
|
+
readonly maxDocumentContentCharactersToExtract?: number;
|
|
399
|
+
}
|
|
400
|
+
/** A list of skills. */
|
|
401
|
+
export interface SearchIndexerSkillset {
|
|
402
|
+
/** The name of the skillset. */
|
|
403
|
+
name: string;
|
|
404
|
+
/** The description of the skillset. */
|
|
405
|
+
description?: string;
|
|
406
|
+
/** A list of skills in the skillset. */
|
|
407
|
+
skills: SearchIndexerSkillUnion[];
|
|
408
|
+
/** Details about the Azure AI service to be used when running skills. */
|
|
409
|
+
cognitiveServicesAccount?: CognitiveServicesAccountUnion;
|
|
410
|
+
/** Definition of additional projections to Azure blob, table, or files, of enriched data. */
|
|
411
|
+
knowledgeStore?: SearchIndexerKnowledgeStore;
|
|
412
|
+
/** Definition of additional projections to secondary search index(es). */
|
|
413
|
+
indexProjection?: SearchIndexerIndexProjection;
|
|
414
|
+
/** The ETag of the skillset. */
|
|
415
|
+
etag?: string;
|
|
416
|
+
/** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your skillset definition when you want full assurance that no one, not even Microsoft, can decrypt your skillset definition. Once you have encrypted your skillset definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your skillset definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */
|
|
417
|
+
encryptionKey?: SearchResourceEncryptionKey;
|
|
418
|
+
}
|
|
419
|
+
/** Base type for skills. */
|
|
420
|
+
export interface SearchIndexerSkill {
|
|
421
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
422
|
+
odatatype: "#Microsoft.Skills.Util.ConditionalSkill" | "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" | "#Microsoft.Skills.Vision.OcrSkill" | "#Microsoft.Skills.Vision.ImageAnalysisSkill" | "#Microsoft.Skills.Text.LanguageDetectionSkill" | "#Microsoft.Skills.Util.ShaperSkill" | "#Microsoft.Skills.Text.MergeSkill" | "#Microsoft.Skills.Text.EntityRecognitionSkill" | "#Microsoft.Skills.Text.SentimentSkill" | "#Microsoft.Skills.Text.V3.SentimentSkill" | "#Microsoft.Skills.Text.V3.EntityLinkingSkill" | "#Microsoft.Skills.Text.V3.EntityRecognitionSkill" | "#Microsoft.Skills.Text.PIIDetectionSkill" | "#Microsoft.Skills.Text.SplitSkill" | "#Microsoft.Skills.Text.CustomEntityLookupSkill" | "#Microsoft.Skills.Text.TranslationSkill" | "#Microsoft.Skills.Util.DocumentExtractionSkill" | "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill" | "#Microsoft.Skills.Custom.WebApiSkill" | "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill";
|
|
423
|
+
/** The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. */
|
|
424
|
+
name?: string;
|
|
425
|
+
/** The description of the skill which describes the inputs, outputs, and usage of the skill. */
|
|
426
|
+
description?: string;
|
|
427
|
+
/** Represents the level at which operations take place, such as the document root or document content (for example, /document or /document/content). The default is /document. */
|
|
428
|
+
context?: string;
|
|
429
|
+
/** Inputs of the skills could be a column in the source data set, or the output of an upstream skill. */
|
|
430
|
+
inputs: InputFieldMappingEntry[];
|
|
431
|
+
/** The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. */
|
|
432
|
+
outputs: OutputFieldMappingEntry[];
|
|
433
|
+
}
|
|
434
|
+
/** Input field mapping for a skill. */
|
|
435
|
+
export interface InputFieldMappingEntry {
|
|
436
|
+
/** The name of the input. */
|
|
437
|
+
name: string;
|
|
438
|
+
/** The source of the input. */
|
|
439
|
+
source?: string;
|
|
440
|
+
/** The source context used for selecting recursive inputs. */
|
|
441
|
+
sourceContext?: string;
|
|
442
|
+
/** The recursive inputs used when creating a complex type. */
|
|
443
|
+
inputs?: InputFieldMappingEntry[];
|
|
444
|
+
}
|
|
445
|
+
/** Output field mapping for a skill. */
|
|
446
|
+
export interface OutputFieldMappingEntry {
|
|
447
|
+
/** The name of the output defined by the skill. */
|
|
448
|
+
name: string;
|
|
449
|
+
/** The target name of the output. It is optional and default to name. */
|
|
450
|
+
targetName?: string;
|
|
451
|
+
}
|
|
452
|
+
/** Base type for describing any Azure AI service resource attached to a skillset. */
|
|
453
|
+
export interface CognitiveServicesAccount {
|
|
454
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
455
|
+
odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices" | "#Microsoft.Azure.Search.CognitiveServicesByKey";
|
|
456
|
+
/** Description of the Azure AI service resource attached to a skillset. */
|
|
457
|
+
description?: string;
|
|
458
|
+
}
|
|
459
|
+
/** Definition of additional projections to azure blob, table, or files, of enriched data. */
|
|
460
|
+
export interface SearchIndexerKnowledgeStore {
|
|
461
|
+
/** The connection string to the storage account projections will be stored in. */
|
|
462
|
+
storageConnectionString: string;
|
|
463
|
+
/** A list of additional projections to perform during indexing. */
|
|
464
|
+
projections: SearchIndexerKnowledgeStoreProjection[];
|
|
465
|
+
}
|
|
466
|
+
/** Container object for various projection selectors. */
|
|
467
|
+
export interface SearchIndexerKnowledgeStoreProjection {
|
|
468
|
+
/** Projections to Azure Table storage. */
|
|
469
|
+
tables?: SearchIndexerKnowledgeStoreTableProjectionSelector[];
|
|
470
|
+
/** Projections to Azure Blob storage. */
|
|
471
|
+
objects?: SearchIndexerKnowledgeStoreObjectProjectionSelector[];
|
|
472
|
+
/** Projections to Azure File storage. */
|
|
473
|
+
files?: SearchIndexerKnowledgeStoreFileProjectionSelector[];
|
|
474
|
+
}
|
|
475
|
+
/** Abstract class to share properties between concrete selectors. */
|
|
476
|
+
export interface SearchIndexerKnowledgeStoreProjectionSelector {
|
|
477
|
+
/** Name of reference key to different projection. */
|
|
478
|
+
referenceKeyName?: string;
|
|
479
|
+
/** Name of generated key to store projection under. */
|
|
480
|
+
generatedKeyName?: string;
|
|
481
|
+
/** Source data to project. */
|
|
482
|
+
source?: string;
|
|
483
|
+
/** Source context for complex projections. */
|
|
484
|
+
sourceContext?: string;
|
|
485
|
+
/** Nested inputs for complex projections. */
|
|
486
|
+
inputs?: InputFieldMappingEntry[];
|
|
487
|
+
}
|
|
488
|
+
/** Definition of additional projections to secondary search indexes. */
|
|
489
|
+
export interface SearchIndexerIndexProjection {
|
|
490
|
+
/** A list of projections to be performed to secondary search indexes. */
|
|
491
|
+
selectors: SearchIndexerIndexProjectionSelector[];
|
|
492
|
+
/** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */
|
|
493
|
+
parameters?: SearchIndexerIndexProjectionParameters;
|
|
494
|
+
}
|
|
495
|
+
/** Description for what data to store in the designated search index. */
|
|
496
|
+
export interface SearchIndexerIndexProjectionSelector {
|
|
497
|
+
/** Name of the search index to project to. Must have a key field with the 'keyword' analyzer set. */
|
|
498
|
+
targetIndexName: string;
|
|
499
|
+
/** Name of the field in the search index to map the parent document's key value to. Must be a string field that is filterable and not the key field. */
|
|
500
|
+
parentKeyFieldName: string;
|
|
501
|
+
/** Source context for the projections. Represents the cardinality at which the document will be split into multiple sub documents. */
|
|
502
|
+
sourceContext: string;
|
|
503
|
+
/** Mappings for the projection, or which source should be mapped to which field in the target index. */
|
|
504
|
+
mappings: InputFieldMappingEntry[];
|
|
505
|
+
}
|
|
506
|
+
/** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */
|
|
507
|
+
export interface SearchIndexerIndexProjectionParameters {
|
|
508
|
+
/** Describes unknown properties. The value of an unknown property can be of "any" type. */
|
|
509
|
+
[property: string]: any;
|
|
510
|
+
/** Defines behavior of the index projections in relation to the rest of the indexer. */
|
|
511
|
+
projectionMode?: IndexProjectionMode;
|
|
512
|
+
}
|
|
513
|
+
/** Response from a list skillset request. If successful, it includes the full definitions of all skillsets. */
|
|
514
|
+
export interface ListSkillsetsResult {
|
|
515
|
+
/**
|
|
516
|
+
* The skillsets defined in the Search service.
|
|
517
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
518
|
+
*/
|
|
519
|
+
readonly skillsets: SearchIndexerSkillset[];
|
|
520
|
+
}
|
|
521
|
+
/** Represents a synonym map definition. */
|
|
522
|
+
export interface SynonymMap {
|
|
523
|
+
/** The name of the synonym map. */
|
|
524
|
+
name: string;
|
|
525
|
+
/** The format of the synonym map. Only the 'solr' format is currently supported. */
|
|
526
|
+
format: "solr";
|
|
527
|
+
/** A series of synonym rules in the specified synonym map format. The rules must be separated by newlines. */
|
|
528
|
+
synonyms: string;
|
|
529
|
+
/** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data. Once you have encrypted your data, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */
|
|
530
|
+
encryptionKey?: SearchResourceEncryptionKey;
|
|
531
|
+
/** The ETag of the synonym map. */
|
|
532
|
+
etag?: string;
|
|
533
|
+
}
|
|
534
|
+
/** Response from a List SynonymMaps request. If successful, it includes the full definitions of all synonym maps. */
|
|
535
|
+
export interface ListSynonymMapsResult {
|
|
536
|
+
/**
|
|
537
|
+
* The synonym maps in the Search service.
|
|
538
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
539
|
+
*/
|
|
540
|
+
readonly synonymMaps: SynonymMap[];
|
|
541
|
+
}
|
|
542
|
+
/** Represents a search index definition, which describes the fields and search behavior of an index. */
|
|
543
|
+
export interface SearchIndex {
|
|
544
|
+
/** The name of the index. */
|
|
545
|
+
name: string;
|
|
546
|
+
/** The description of the index. */
|
|
547
|
+
description?: string;
|
|
548
|
+
/** The fields of the index. */
|
|
549
|
+
fields: SearchField[];
|
|
550
|
+
/** The scoring profiles for the index. */
|
|
551
|
+
scoringProfiles?: ScoringProfile[];
|
|
552
|
+
/** The name of the scoring profile to use if none is specified in the query. If this property is not set and no scoring profile is specified in the query, then default scoring (tf-idf) will be used. */
|
|
553
|
+
defaultScoringProfile?: string;
|
|
554
|
+
/** Options to control Cross-Origin Resource Sharing (CORS) for the index. */
|
|
555
|
+
corsOptions?: CorsOptions;
|
|
556
|
+
/** The suggesters for the index. */
|
|
557
|
+
suggesters?: Suggester[];
|
|
558
|
+
/** The analyzers for the index. */
|
|
559
|
+
analyzers?: LexicalAnalyzerUnion[];
|
|
560
|
+
/** The tokenizers for the index. */
|
|
561
|
+
tokenizers?: LexicalTokenizerUnion[];
|
|
562
|
+
/** The token filters for the index. */
|
|
563
|
+
tokenFilters?: TokenFilterUnion[];
|
|
564
|
+
/** The character filters for the index. */
|
|
565
|
+
charFilters?: CharFilterUnion[];
|
|
566
|
+
/** The normalizers for the index. */
|
|
567
|
+
normalizers?: BaseLexicalNormalizerUnion[];
|
|
568
|
+
/** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data. Once you have encrypted your data, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */
|
|
569
|
+
encryptionKey?: SearchResourceEncryptionKey;
|
|
570
|
+
/** The type of similarity algorithm to be used when scoring and ranking the documents matching a search query. The similarity algorithm can only be defined at index creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used. */
|
|
571
|
+
similarity?: SimilarityUnion;
|
|
572
|
+
/** Defines parameters for a search index that influence semantic capabilities. */
|
|
573
|
+
semanticSearch?: SemanticSearch;
|
|
574
|
+
/** Contains configuration options related to vector search. */
|
|
575
|
+
vectorSearch?: VectorSearch;
|
|
576
|
+
/** The ETag of the index. */
|
|
577
|
+
etag?: string;
|
|
578
|
+
}
|
|
579
|
+
/** Represents a field in an index definition, which describes the name, data type, and search behavior of a field. */
|
|
580
|
+
export interface SearchField {
|
|
581
|
+
/** The name of the field, which must be unique within the fields collection of the index or parent field. */
|
|
582
|
+
name: string;
|
|
583
|
+
/** The data type of the field. */
|
|
584
|
+
type: SearchFieldDataType;
|
|
585
|
+
/** A value indicating whether the field uniquely identifies documents in the index. Exactly one top-level field in each index must be chosen as the key field and it must be of type Edm.String. Key fields can be used to look up documents directly and update or delete specific documents. Default is false for simple fields and null for complex fields. */
|
|
586
|
+
key?: boolean;
|
|
587
|
+
/** A value indicating whether the field can be returned in a search result. You can disable this option if you want to use a field (for example, margin) as a filter, sorting, or scoring mechanism but do not want the field to be visible to the end user. This property must be true for key fields, and it must be null for complex fields. This property can be changed on existing fields. Enabling this property does not cause any increase in index storage requirements. Default is true for simple fields, false for vector fields, and null for complex fields. */
|
|
588
|
+
retrievable?: boolean;
|
|
589
|
+
/** An immutable value indicating whether the field will be persisted separately on disk to be returned in a search result. You can disable this option if you don't plan to return the field contents in a search response to save on storage overhead. This can only be set during index creation and only for vector fields. This property cannot be changed for existing fields or set as false for new fields. If this property is set as false, the property 'retrievable' must also be set to false. This property must be true or unset for key fields, for new fields, and for non-vector fields, and it must be null for complex fields. Disabling this property will reduce index storage requirements. The default is true for vector fields. */
|
|
590
|
+
stored?: boolean;
|
|
591
|
+
/** A value indicating whether the field is full-text searchable. This means it will undergo analysis such as word-breaking during indexing. If you set a searchable field to a value like "sunny day", internally it will be split into the individual tokens "sunny" and "day". This enables full-text searches for these terms. Fields of type Edm.String or Collection(Edm.String) are searchable by default. This property must be false for simple fields of other non-string data types, and it must be null for complex fields. Note: searchable fields consume extra space in your index to accommodate additional tokenized versions of the field value for full-text searches. If you want to save space in your index and you don't need a field to be included in searches, set searchable to false. */
|
|
592
|
+
searchable?: boolean;
|
|
593
|
+
/** A value indicating whether to enable the field to be referenced in $filter queries. filterable differs from searchable in how strings are handled. Fields of type Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are for exact matches only. For example, if you set such a field f to "sunny day", $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property must be null for complex fields. Default is true for simple fields and null for complex fields. */
|
|
594
|
+
filterable?: boolean;
|
|
595
|
+
/** A value indicating whether to enable the field to be referenced in $orderby expressions. By default, the search engine sorts results by score, but in many experiences users will want to sort by fields in the documents. A simple field can be sortable only if it is single-valued (it has a single value in the scope of the parent document). Simple collection fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex collections are also multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent field, or an ancestor field, that's the complex collection. Complex fields cannot be sortable and the sortable property must be null for such fields. The default for sortable is true for single-valued simple fields, false for multi-valued simple fields, and null for complex fields. */
|
|
596
|
+
sortable?: boolean;
|
|
597
|
+
/** A value indicating whether to enable the field to be referenced in facet queries. Typically used in a presentation of search results that includes hit count by category (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple fields. */
|
|
598
|
+
facetable?: boolean;
|
|
599
|
+
/** The name of the analyzer to use for the field. This option can be used only with searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. */
|
|
600
|
+
analyzer?: LexicalAnalyzerName;
|
|
601
|
+
/** The name of the analyzer used at search time for the field. This option can be used only with searchable fields. It must be set together with indexAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. This analyzer can be updated on an existing field. Must be null for complex fields. */
|
|
602
|
+
searchAnalyzer?: LexicalAnalyzerName;
|
|
603
|
+
/** The name of the analyzer used at indexing time for the field. This option can be used only with searchable fields. It must be set together with searchAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. */
|
|
604
|
+
indexAnalyzer?: LexicalAnalyzerName;
|
|
605
|
+
/** The name of the normalizer to use for the field. This option can be used only with fields with filterable, sortable, or facetable enabled. Once the normalizer is chosen, it cannot be changed for the field. Must be null for complex fields. */
|
|
606
|
+
normalizer?: LexicalNormalizerName;
|
|
607
|
+
/** The dimensionality of the vector field. */
|
|
608
|
+
vectorSearchDimensions?: number;
|
|
609
|
+
/** The name of the vector search profile that specifies the algorithm and vectorizer to use when searching the vector field. */
|
|
610
|
+
vectorSearchProfileName?: string;
|
|
611
|
+
/** The encoding format to interpret the field contents. */
|
|
612
|
+
vectorEncodingFormat?: VectorEncodingFormat;
|
|
613
|
+
/** A list of the names of synonym maps to associate with this field. This option can be used only with searchable fields. Currently only one synonym map per field is supported. Assigning a synonym map to a field ensures that query terms targeting that field are expanded at query-time using the rules in the synonym map. This attribute can be changed on existing fields. Must be null or an empty collection for complex fields. */
|
|
614
|
+
synonymMaps?: string[];
|
|
615
|
+
/** A list of sub-fields if this is a field of type Edm.ComplexType or Collection(Edm.ComplexType). Must be null or empty for simple fields. */
|
|
616
|
+
fields?: SearchField[];
|
|
617
|
+
}
|
|
618
|
+
/** Defines parameters for a search index that influence scoring in search queries. */
|
|
619
|
+
export interface ScoringProfile {
|
|
620
|
+
/** The name of the scoring profile. */
|
|
621
|
+
name: string;
|
|
622
|
+
/** Parameters that boost scoring based on text matches in certain index fields. */
|
|
623
|
+
textWeights?: TextWeights;
|
|
624
|
+
/** The collection of functions that influence the scoring of documents. */
|
|
625
|
+
functions?: ScoringFunctionUnion[];
|
|
626
|
+
/** A value indicating how the results of individual scoring functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions. */
|
|
627
|
+
functionAggregation?: ScoringFunctionAggregation;
|
|
628
|
+
}
|
|
629
|
+
/** Defines weights on index fields for which matches should boost scoring in search queries. */
|
|
630
|
+
export interface TextWeights {
|
|
631
|
+
/** The dictionary of per-field weights to boost document scoring. The keys are field names and the values are the weights for each field. */
|
|
632
|
+
weights: {
|
|
633
|
+
[propertyName: string]: number;
|
|
634
|
+
};
|
|
635
|
+
}
|
|
636
|
+
/** Base type for functions that can modify document scores during ranking. */
|
|
637
|
+
export interface ScoringFunction {
|
|
638
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
639
|
+
type: "distance" | "freshness" | "magnitude" | "tag";
|
|
640
|
+
/** The name of the field used as input to the scoring function. */
|
|
641
|
+
fieldName: string;
|
|
642
|
+
/** A multiplier for the raw score. Must be a positive number not equal to 1.0. */
|
|
643
|
+
boost: number;
|
|
644
|
+
/** A value indicating how boosting will be interpolated across document scores; defaults to "Linear". */
|
|
645
|
+
interpolation?: ScoringFunctionInterpolation;
|
|
646
|
+
}
|
|
647
|
+
/** Defines options to control Cross-Origin Resource Sharing (CORS) for an index. */
|
|
648
|
+
export interface CorsOptions {
|
|
649
|
+
/** The list of origins from which JavaScript code will be granted access to your index. Can contain a list of hosts of the form {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not recommended). */
|
|
650
|
+
allowedOrigins: string[];
|
|
651
|
+
/** The duration for which browsers should cache CORS preflight responses. Defaults to 5 minutes. */
|
|
652
|
+
maxAgeInSeconds?: number;
|
|
653
|
+
}
|
|
654
|
+
/** Defines how the Suggest API should apply to a group of fields in the index. */
|
|
655
|
+
export interface Suggester {
|
|
656
|
+
/** The name of the suggester. */
|
|
657
|
+
name: string;
|
|
658
|
+
/** A value indicating the capabilities of the suggester. */
|
|
659
|
+
searchMode: "analyzingInfixMatching";
|
|
660
|
+
/** The list of field names to which the suggester applies. Each field must be searchable. */
|
|
661
|
+
sourceFields: string[];
|
|
662
|
+
}
|
|
663
|
+
/** Base type for analyzers. */
|
|
664
|
+
export interface LexicalAnalyzer {
|
|
665
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
666
|
+
odatatype: "#Microsoft.Azure.Search.CustomAnalyzer" | "#Microsoft.Azure.Search.PatternAnalyzer" | "#Microsoft.Azure.Search.StandardAnalyzer" | "#Microsoft.Azure.Search.StopAnalyzer";
|
|
667
|
+
/** The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */
|
|
668
|
+
name: string;
|
|
669
|
+
}
|
|
670
|
+
/** Base type for tokenizers. */
|
|
671
|
+
export interface LexicalTokenizer {
|
|
672
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
673
|
+
odatatype: "#Microsoft.Azure.Search.ClassicTokenizer" | "#Microsoft.Azure.Search.EdgeNGramTokenizer" | "#Microsoft.Azure.Search.KeywordTokenizer" | "#Microsoft.Azure.Search.KeywordTokenizerV2" | "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer" | "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer" | "#Microsoft.Azure.Search.NGramTokenizer" | "#Microsoft.Azure.Search.PathHierarchyTokenizerV2" | "#Microsoft.Azure.Search.PatternTokenizer" | "#Microsoft.Azure.Search.StandardTokenizer" | "#Microsoft.Azure.Search.StandardTokenizerV2" | "#Microsoft.Azure.Search.UaxUrlEmailTokenizer";
|
|
674
|
+
/** The name of the tokenizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */
|
|
675
|
+
name: string;
|
|
676
|
+
}
|
|
677
|
+
/** Base type for token filters. */
|
|
678
|
+
export interface TokenFilter {
|
|
679
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
680
|
+
odatatype: "#Microsoft.Azure.Search.AsciiFoldingTokenFilter" | "#Microsoft.Azure.Search.CjkBigramTokenFilter" | "#Microsoft.Azure.Search.CommonGramTokenFilter" | "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter" | "#Microsoft.Azure.Search.EdgeNGramTokenFilter" | "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2" | "#Microsoft.Azure.Search.ElisionTokenFilter" | "#Microsoft.Azure.Search.KeepTokenFilter" | "#Microsoft.Azure.Search.KeywordMarkerTokenFilter" | "#Microsoft.Azure.Search.LengthTokenFilter" | "#Microsoft.Azure.Search.LimitTokenFilter" | "#Microsoft.Azure.Search.NGramTokenFilter" | "#Microsoft.Azure.Search.NGramTokenFilterV2" | "#Microsoft.Azure.Search.PatternCaptureTokenFilter" | "#Microsoft.Azure.Search.PatternReplaceTokenFilter" | "#Microsoft.Azure.Search.PhoneticTokenFilter" | "#Microsoft.Azure.Search.ShingleTokenFilter" | "#Microsoft.Azure.Search.SnowballTokenFilter" | "#Microsoft.Azure.Search.StemmerTokenFilter" | "#Microsoft.Azure.Search.StemmerOverrideTokenFilter" | "#Microsoft.Azure.Search.StopwordsTokenFilter" | "#Microsoft.Azure.Search.SynonymTokenFilter" | "#Microsoft.Azure.Search.TruncateTokenFilter" | "#Microsoft.Azure.Search.UniqueTokenFilter" | "#Microsoft.Azure.Search.WordDelimiterTokenFilter";
|
|
681
|
+
/** The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */
|
|
682
|
+
name: string;
|
|
683
|
+
}
|
|
684
|
+
/** Base type for character filters. */
|
|
685
|
+
export interface CharFilter {
|
|
686
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
687
|
+
odatatype: "#Microsoft.Azure.Search.MappingCharFilter" | "#Microsoft.Azure.Search.PatternReplaceCharFilter";
|
|
688
|
+
/** The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */
|
|
689
|
+
name: string;
|
|
690
|
+
}
|
|
691
|
+
/** Base type for normalizers. */
|
|
692
|
+
export interface BaseLexicalNormalizer {
|
|
693
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
694
|
+
odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
|
|
695
|
+
/** The name of the normalizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. */
|
|
696
|
+
name: string;
|
|
697
|
+
}
|
|
698
|
+
/** Base type for similarity algorithms. Similarity algorithms are used to calculate scores that tie queries to documents. The higher the score, the more relevant the document is to that specific query. Those scores are used to rank the search results. */
|
|
699
|
+
export interface Similarity {
|
|
700
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
701
|
+
odatatype: "#Microsoft.Azure.Search.ClassicSimilarity" | "#Microsoft.Azure.Search.BM25Similarity";
|
|
702
|
+
}
|
|
703
|
+
/** Defines parameters for a search index that influence semantic capabilities. */
|
|
704
|
+
export interface SemanticSearch {
|
|
705
|
+
/** Allows you to set the name of a default semantic configuration in your index, making it optional to pass it on as a query parameter every time. */
|
|
706
|
+
defaultConfigurationName?: string;
|
|
707
|
+
/** The semantic configurations for the index. */
|
|
708
|
+
configurations?: SemanticConfiguration[];
|
|
709
|
+
}
|
|
710
|
+
/** Defines a specific configuration to be used in the context of semantic capabilities. */
|
|
711
|
+
export interface SemanticConfiguration {
|
|
712
|
+
/** The name of the semantic configuration. */
|
|
713
|
+
name: string;
|
|
714
|
+
/** Describes the title, content, and keyword fields to be used for semantic ranking, captions, highlights, and answers. At least one of the three sub properties (titleField, prioritizedKeywordsFields and prioritizedContentFields) need to be set. */
|
|
715
|
+
prioritizedFields: SemanticPrioritizedFields;
|
|
716
|
+
/** Specifies the score type to be used for the sort order of the search results. */
|
|
717
|
+
rankingOrder?: RankingOrder;
|
|
718
|
+
}
|
|
719
|
+
/** Describes the title, content, and keywords fields to be used for semantic ranking, captions, highlights, and answers. */
|
|
720
|
+
export interface SemanticPrioritizedFields {
|
|
721
|
+
/** Defines the title field to be used for semantic ranking, captions, highlights, and answers. If you don't have a title field in your index, leave this blank. */
|
|
722
|
+
titleField?: SemanticField;
|
|
723
|
+
/** Defines the content fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain text in natural language form. The order of the fields in the array represents their priority. Fields with lower priority may get truncated if the content is long. */
|
|
724
|
+
contentFields?: SemanticField[];
|
|
725
|
+
/** Defines the keyword fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain a list of keywords. The order of the fields in the array represents their priority. Fields with lower priority may get truncated if the content is long. */
|
|
726
|
+
keywordsFields?: SemanticField[];
|
|
727
|
+
}
|
|
728
|
+
/** A field that is used as part of the semantic configuration. */
|
|
729
|
+
export interface SemanticField {
|
|
730
|
+
name: string;
|
|
731
|
+
}
|
|
732
|
+
/** Contains configuration options related to vector search. */
|
|
733
|
+
export interface VectorSearch {
|
|
734
|
+
/** Defines combinations of configurations to use with vector search. */
|
|
735
|
+
profiles?: VectorSearchProfile[];
|
|
736
|
+
/** Contains configuration options specific to the algorithm used during indexing or querying. */
|
|
737
|
+
algorithms?: VectorSearchAlgorithmConfigurationUnion[];
|
|
738
|
+
/** Contains configuration options on how to vectorize text vector queries. */
|
|
739
|
+
vectorizers?: VectorSearchVectorizerUnion[];
|
|
740
|
+
/** Contains configuration options specific to the compression method used during indexing or querying. */
|
|
741
|
+
compressions?: VectorSearchCompressionUnion[];
|
|
742
|
+
}
|
|
743
|
+
/** Defines a combination of configurations to use with vector search. */
|
|
744
|
+
export interface VectorSearchProfile {
|
|
745
|
+
/** The name to associate with this particular vector search profile. */
|
|
746
|
+
name: string;
|
|
747
|
+
/** The name of the vector search algorithm configuration that specifies the algorithm and optional parameters. */
|
|
748
|
+
algorithmConfigurationName: string;
|
|
749
|
+
/** The name of the vectorization being configured for use with vector search. */
|
|
750
|
+
vectorizerName?: string;
|
|
751
|
+
/** The name of the compression method configuration that specifies the compression method and optional parameters. */
|
|
752
|
+
compressionName?: string;
|
|
753
|
+
}
|
|
754
|
+
/** Contains configuration options specific to the algorithm used during indexing or querying. */
|
|
755
|
+
export interface VectorSearchAlgorithmConfiguration {
|
|
756
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
757
|
+
kind: "hnsw" | "exhaustiveKnn";
|
|
758
|
+
/** The name to associate with this particular configuration. */
|
|
759
|
+
name: string;
|
|
760
|
+
}
|
|
761
|
+
/** Specifies the vectorization method to be used during query time. */
|
|
762
|
+
export interface VectorSearchVectorizer {
|
|
763
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
764
|
+
kind: "azureOpenAI" | "customWebApi";
|
|
765
|
+
/** The name to associate with this particular vectorization method. */
|
|
766
|
+
vectorizerName: string;
|
|
767
|
+
}
|
|
768
|
+
/** Contains configuration options specific to the compression method used during indexing or querying. */
|
|
769
|
+
export interface VectorSearchCompression {
|
|
770
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
771
|
+
kind: "scalarQuantization" | "binaryQuantization";
|
|
772
|
+
/** The name to associate with this particular configuration. */
|
|
773
|
+
compressionName: string;
|
|
774
|
+
/** Contains the options for rescoring. */
|
|
775
|
+
rescoringOptions?: RescoringOptions;
|
|
776
|
+
/** The number of dimensions to truncate the vectors to. Truncating the vectors reduces the size of the vectors and the amount of data that needs to be transferred during search. This can save storage cost and improve search performance at the expense of recall. It should be only used for embeddings trained with Matryoshka Representation Learning (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no truncation. */
|
|
777
|
+
truncationDimension?: number;
|
|
778
|
+
/** If set to true, once the ordered set of results calculated using compressed vectors are obtained, they will be reranked again by recalculating the full-precision similarity scores. This will improve recall at the expense of latency. */
|
|
779
|
+
rerankWithOriginalVectors?: boolean;
|
|
780
|
+
/** Default oversampling factor. Oversampling will internally request more documents (specified by this multiplier) in the initial search. This increases the set of results that will be reranked using recomputed similarity scores from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. */
|
|
781
|
+
defaultOversampling?: number;
|
|
782
|
+
}
|
|
783
|
+
/** Contains the options for rescoring. */
|
|
784
|
+
export interface RescoringOptions {
|
|
785
|
+
/** If set to true, after the initial search on the compressed vectors, the similarity scores are recalculated using the full-precision vectors. This will improve recall at the expense of latency. */
|
|
786
|
+
enableRescoring?: boolean;
|
|
787
|
+
/** Default oversampling factor. Oversampling retrieves a greater set of potential documents to offset the resolution loss due to quantization. This increases the set of results that will be rescored on full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when 'enableRescoring' is true. Higher values improve recall at the expense of latency. */
|
|
788
|
+
defaultOversampling?: number;
|
|
789
|
+
/** Controls the storage method for original vectors. This setting is immutable. */
|
|
790
|
+
rescoreStorageMethod?: VectorSearchCompressionRescoreStorageMethod;
|
|
791
|
+
}
|
|
792
|
+
/** Response from a List Indexes request. If successful, it includes the full definitions of all indexes. */
|
|
793
|
+
export interface ListIndexesResult {
|
|
794
|
+
/**
|
|
795
|
+
* The indexes in the Search service.
|
|
796
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
797
|
+
*/
|
|
798
|
+
readonly indexes: SearchIndex[];
|
|
799
|
+
}
|
|
800
|
+
/** Statistics for a given index. Statistics are collected periodically and are not guaranteed to always be up-to-date. */
|
|
801
|
+
export interface GetIndexStatisticsResult {
|
|
802
|
+
/**
|
|
803
|
+
* The number of documents in the index.
|
|
804
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
805
|
+
*/
|
|
806
|
+
readonly documentCount: number;
|
|
807
|
+
/**
|
|
808
|
+
* The amount of storage in bytes consumed by the index.
|
|
809
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
810
|
+
*/
|
|
811
|
+
readonly storageSize: number;
|
|
812
|
+
/**
|
|
813
|
+
* The amount of memory in bytes consumed by vectors in the index.
|
|
814
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
815
|
+
*/
|
|
816
|
+
readonly vectorIndexSize: number;
|
|
817
|
+
}
|
|
818
|
+
/** Specifies some text and analysis components used to break that text into tokens. */
|
|
819
|
+
export interface AnalyzeRequest {
|
|
820
|
+
/** The text to break into tokens. */
|
|
821
|
+
text: string;
|
|
822
|
+
/** The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. KnownAnalyzerNames is an enum containing known values. */
|
|
823
|
+
analyzer?: string;
|
|
824
|
+
/** The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. KnownTokenizerNames is an enum containing known values. */
|
|
825
|
+
tokenizer?: string;
|
|
826
|
+
/** The name of the normalizer to use to normalize the given text. */
|
|
827
|
+
normalizer?: LexicalNormalizerName;
|
|
828
|
+
/** An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */
|
|
829
|
+
tokenFilters?: string[];
|
|
830
|
+
/** An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */
|
|
831
|
+
charFilters?: string[];
|
|
832
|
+
}
|
|
833
|
+
/** The result of testing an analyzer on text. */
|
|
834
|
+
export interface AnalyzeResult {
|
|
835
|
+
/** The list of tokens returned by the analyzer specified in the request. */
|
|
836
|
+
tokens: AnalyzedTokenInfo[];
|
|
837
|
+
}
|
|
838
|
+
/** Information about a token returned by an analyzer. */
|
|
839
|
+
export interface AnalyzedTokenInfo {
|
|
840
|
+
/**
|
|
841
|
+
* The token returned by the analyzer.
|
|
842
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
843
|
+
*/
|
|
844
|
+
readonly token: string;
|
|
845
|
+
/**
|
|
846
|
+
* The index of the first character of the token in the input text.
|
|
847
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
848
|
+
*/
|
|
849
|
+
readonly startOffset: number;
|
|
850
|
+
/**
|
|
851
|
+
* The index of the last character of the token in the input text.
|
|
852
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
853
|
+
*/
|
|
854
|
+
readonly endOffset: number;
|
|
855
|
+
/**
|
|
856
|
+
* The position of the token in the input text relative to other tokens. The first token in the input text has position 0, the next has position 1, and so on. Depending on the analyzer used, some tokens might have the same position, for example if they are synonyms of each other.
|
|
857
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
858
|
+
*/
|
|
859
|
+
readonly position: number;
|
|
860
|
+
}
|
|
861
|
+
/** Response from a get service statistics request. If successful, it includes service level counters and limits. */
|
|
862
|
+
export interface ServiceStatistics {
|
|
863
|
+
/** Service level resource counters. */
|
|
864
|
+
counters: ServiceCounters;
|
|
865
|
+
/** Service level general limits. */
|
|
866
|
+
limits: ServiceLimits;
|
|
867
|
+
}
|
|
868
|
+
/** Represents service-level resource counters and quotas. */
|
|
869
|
+
export interface ServiceCounters {
|
|
870
|
+
/** Total number of documents across all indexes in the service. */
|
|
871
|
+
documentCounter: ResourceCounter;
|
|
872
|
+
/** Total number of indexes. */
|
|
873
|
+
indexCounter: ResourceCounter;
|
|
874
|
+
/** Total number of indexers. */
|
|
875
|
+
indexerCounter: ResourceCounter;
|
|
876
|
+
/** Total number of data sources. */
|
|
877
|
+
dataSourceCounter: ResourceCounter;
|
|
878
|
+
/** Total size of used storage in bytes. */
|
|
879
|
+
storageSizeCounter: ResourceCounter;
|
|
880
|
+
/** Total number of synonym maps. */
|
|
881
|
+
synonymMapCounter: ResourceCounter;
|
|
882
|
+
/** Total number of skillsets. */
|
|
883
|
+
skillsetCounter: ResourceCounter;
|
|
884
|
+
/** Total memory consumption of all vector indexes within the service, in bytes. */
|
|
885
|
+
vectorIndexSizeCounter: ResourceCounter;
|
|
886
|
+
}
|
|
887
|
+
/** Represents a resource's usage and quota. */
|
|
888
|
+
export interface ResourceCounter {
|
|
889
|
+
/** The resource usage amount. */
|
|
890
|
+
usage: number;
|
|
891
|
+
/** The resource amount quota. */
|
|
892
|
+
quota?: number;
|
|
893
|
+
}
|
|
894
|
+
/** Represents various service level limits. */
|
|
895
|
+
export interface ServiceLimits {
|
|
896
|
+
/** The maximum allowed fields per index. */
|
|
897
|
+
maxFieldsPerIndex?: number;
|
|
898
|
+
/** The maximum depth which you can nest sub-fields in an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. */
|
|
899
|
+
maxFieldNestingDepthPerIndex?: number;
|
|
900
|
+
/** The maximum number of fields of type Collection(Edm.ComplexType) allowed in an index. */
|
|
901
|
+
maxComplexCollectionFieldsPerIndex?: number;
|
|
902
|
+
/** The maximum number of objects in complex collections allowed per document. */
|
|
903
|
+
maxComplexObjectsInCollectionsPerDocument?: number;
|
|
904
|
+
/** The maximum amount of storage in bytes allowed per index. */
|
|
905
|
+
maxStoragePerIndexInBytes?: number;
|
|
906
|
+
}
|
|
907
|
+
/** Contains the parameters specific to the HNSW algorithm. */
|
|
908
|
+
export interface HnswParameters {
|
|
909
|
+
/** The number of bi-directional links created for every new element during construction. Increasing this parameter value may improve recall and reduce retrieval times for datasets with high intrinsic dimensionality at the expense of increased memory consumption and longer indexing time. */
|
|
910
|
+
m?: number;
|
|
911
|
+
/** The size of the dynamic list containing the nearest neighbors, which is used during index time. Increasing this parameter may improve index quality, at the expense of increased indexing time. At a certain point, increasing this parameter leads to diminishing returns. */
|
|
912
|
+
efConstruction?: number;
|
|
913
|
+
/** The size of the dynamic list containing the nearest neighbors, which is used during search time. Increasing this parameter may improve search results, at the expense of slower search. At a certain point, increasing this parameter leads to diminishing returns. */
|
|
914
|
+
efSearch?: number;
|
|
915
|
+
/** The similarity metric to use for vector comparisons. */
|
|
916
|
+
metric?: VectorSearchAlgorithmMetric;
|
|
917
|
+
}
|
|
918
|
+
/** Contains the parameters specific to exhaustive KNN algorithm. */
|
|
919
|
+
export interface ExhaustiveKnnParameters {
|
|
920
|
+
/** The similarity metric to use for vector comparisons. */
|
|
921
|
+
metric?: VectorSearchAlgorithmMetric;
|
|
922
|
+
}
|
|
923
|
+
/** Contains the parameters specific to Scalar Quantization. */
|
|
924
|
+
export interface ScalarQuantizationParameters {
|
|
925
|
+
/** The quantized data type of compressed vector values. */
|
|
926
|
+
quantizedDataType?: VectorSearchCompressionTarget;
|
|
927
|
+
}
|
|
928
|
+
/** Specifies the parameters for connecting to the Azure OpenAI resource. */
|
|
929
|
+
export interface AzureOpenAIParameters {
|
|
930
|
+
/** The resource URI of the Azure OpenAI resource. */
|
|
931
|
+
resourceUrl?: string;
|
|
932
|
+
/** ID of the Azure OpenAI model deployment on the designated resource. */
|
|
933
|
+
deploymentId?: string;
|
|
934
|
+
/** API key of the designated Azure OpenAI resource. */
|
|
935
|
+
apiKey?: string;
|
|
936
|
+
/** The user-assigned managed identity used for outbound connections. */
|
|
937
|
+
authIdentity?: SearchIndexerDataIdentityUnion;
|
|
938
|
+
/** The name of the embedding model that is deployed at the provided deploymentId path. */
|
|
939
|
+
modelName?: AzureOpenAIModelName;
|
|
940
|
+
}
|
|
941
|
+
/** Abstract base type for data identities. */
|
|
942
|
+
export interface SearchIndexerDataIdentity {
|
|
943
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
944
|
+
odatatype: "#Microsoft.Azure.Search.DataNoneIdentity" | "#Microsoft.Azure.Search.DataUserAssignedIdentity";
|
|
945
|
+
}
|
|
946
|
+
/** Specifies the properties for connecting to a user-defined vectorizer. */
|
|
947
|
+
export interface WebApiParameters {
|
|
948
|
+
/** The URI of the Web API providing the vectorizer. */
|
|
949
|
+
uri?: string;
|
|
950
|
+
/** The headers required to make the HTTP request. */
|
|
951
|
+
httpHeaders?: {
|
|
952
|
+
[propertyName: string]: string;
|
|
953
|
+
};
|
|
954
|
+
/** The method for the HTTP request. */
|
|
955
|
+
httpMethod?: string;
|
|
956
|
+
/** The desired timeout for the request. Default is 30 seconds. */
|
|
957
|
+
timeout?: string;
|
|
958
|
+
/** Applies to custom endpoints that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the vectorization connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */
|
|
959
|
+
authResourceId?: string;
|
|
960
|
+
/** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */
|
|
961
|
+
authIdentity?: SearchIndexerDataIdentityUnion;
|
|
962
|
+
}
|
|
963
|
+
/** Provides parameter values to a distance scoring function. */
|
|
964
|
+
export interface DistanceScoringParameters {
|
|
965
|
+
/** The name of the parameter passed in search queries to specify the reference location. */
|
|
966
|
+
referencePointParameter: string;
|
|
967
|
+
/** The distance in kilometers from the reference location where the boosting range ends. */
|
|
968
|
+
boostingDistance: number;
|
|
969
|
+
}
|
|
970
|
+
/** Provides parameter values to a freshness scoring function. */
|
|
971
|
+
export interface FreshnessScoringParameters {
|
|
972
|
+
/** The expiration period after which boosting will stop for a particular document. */
|
|
973
|
+
boostingDuration: string;
|
|
974
|
+
}
|
|
975
|
+
/** Provides parameter values to a magnitude scoring function. */
|
|
976
|
+
export interface MagnitudeScoringParameters {
|
|
977
|
+
/** The field value at which boosting starts. */
|
|
978
|
+
boostingRangeStart: number;
|
|
979
|
+
/** The field value at which boosting ends. */
|
|
980
|
+
boostingRangeEnd: number;
|
|
981
|
+
/** A value indicating whether to apply a constant boost for field values beyond the range end value; default is false. */
|
|
982
|
+
shouldBoostBeyondRangeByConstant?: boolean;
|
|
983
|
+
}
|
|
984
|
+
/** Provides parameter values to a tag scoring function. */
|
|
985
|
+
export interface TagScoringParameters {
|
|
986
|
+
/** The name of the parameter passed in search queries to specify the list of tags to compare against the target field. */
|
|
987
|
+
tagsParameter: string;
|
|
988
|
+
}
|
|
989
|
+
/** A dictionary of knowledge store-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */
|
|
990
|
+
export interface SearchIndexerKnowledgeStoreParameters {
|
|
991
|
+
/** Describes unknown properties. The value of an unknown property can be of "any" type. */
|
|
992
|
+
[property: string]: any;
|
|
993
|
+
/** Whether or not projections should synthesize a generated key name if one isn't already present. */
|
|
994
|
+
synthesizeGeneratedKeyName?: boolean;
|
|
995
|
+
}
|
|
996
|
+
/** An object that contains information about the matches that were found, and related metadata. */
|
|
997
|
+
export interface CustomEntity {
|
|
998
|
+
/** The top-level entity descriptor. Matches in the skill output will be grouped by this name, and it should represent the "normalized" form of the text being found. */
|
|
999
|
+
name: string;
|
|
1000
|
+
/** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */
|
|
1001
|
+
description?: string;
|
|
1002
|
+
/** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */
|
|
1003
|
+
type?: string;
|
|
1004
|
+
/** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */
|
|
1005
|
+
subtype?: string;
|
|
1006
|
+
/** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */
|
|
1007
|
+
id?: string;
|
|
1008
|
+
/** Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to character casing. Sample case insensitive matches of "Microsoft" could be: microsoft, microSoft, MICROSOFT. */
|
|
1009
|
+
caseSensitive?: boolean;
|
|
1010
|
+
/** Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to accent. */
|
|
1011
|
+
accentSensitive?: boolean;
|
|
1012
|
+
/** Defaults to 0. Maximum value of 5. Denotes the acceptable number of divergent characters that would still constitute a match with the entity name. The smallest possible fuzziness for any given match is returned. For instance, if the edit distance is set to 3, "Windows10" would still match "Windows", "Windows10" and "Windows 7". When case sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but otherwise do. */
|
|
1013
|
+
fuzzyEditDistance?: number;
|
|
1014
|
+
/** Changes the default case sensitivity value for this entity. It be used to change the default value of all aliases caseSensitive values. */
|
|
1015
|
+
defaultCaseSensitive?: boolean;
|
|
1016
|
+
/** Changes the default accent sensitivity value for this entity. It be used to change the default value of all aliases accentSensitive values. */
|
|
1017
|
+
defaultAccentSensitive?: boolean;
|
|
1018
|
+
/** Changes the default fuzzy edit distance value for this entity. It can be used to change the default value of all aliases fuzzyEditDistance values. */
|
|
1019
|
+
defaultFuzzyEditDistance?: number;
|
|
1020
|
+
/** An array of complex objects that can be used to specify alternative spellings or synonyms to the root entity name. */
|
|
1021
|
+
aliases?: CustomEntityAlias[];
|
|
1022
|
+
}
|
|
1023
|
+
/** A complex object that can be used to specify alternative spellings or synonyms to the root entity name. */
|
|
1024
|
+
export interface CustomEntityAlias {
|
|
1025
|
+
/** The text of the alias. */
|
|
1026
|
+
text: string;
|
|
1027
|
+
/** Determine if the alias is case sensitive. */
|
|
1028
|
+
caseSensitive?: boolean;
|
|
1029
|
+
/** Determine if the alias is accent sensitive. */
|
|
1030
|
+
accentSensitive?: boolean;
|
|
1031
|
+
/** Determine the fuzzy edit distance of the alias. */
|
|
1032
|
+
fuzzyEditDistance?: number;
|
|
1033
|
+
}
|
|
1034
|
+
/** Controls the cardinality for chunking the content. */
|
|
1035
|
+
export interface DocumentIntelligenceLayoutSkillChunkingProperties {
|
|
1036
|
+
/** The unit of the chunk. */
|
|
1037
|
+
unit?: DocumentIntelligenceLayoutSkillChunkingUnit;
|
|
1038
|
+
/** The maximum chunk length in characters. Default is 500. */
|
|
1039
|
+
maximumLength?: number;
|
|
1040
|
+
/** The length of overlap provided between two text chunks. Default is 0. */
|
|
1041
|
+
overlapLength?: number;
|
|
1042
|
+
}
|
|
1043
|
+
/** Defines a data change detection policy that captures changes based on the value of a high water mark column. */
|
|
1044
|
+
export interface HighWaterMarkChangeDetectionPolicy extends DataChangeDetectionPolicy {
|
|
1045
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1046
|
+
odatatype: "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy";
|
|
1047
|
+
/** The name of the high water mark column. */
|
|
1048
|
+
highWaterMarkColumnName: string;
|
|
1049
|
+
}
|
|
1050
|
+
/** Defines a data change detection policy that captures changes using the Integrated Change Tracking feature of Azure SQL Database. */
|
|
1051
|
+
export interface SqlIntegratedChangeTrackingPolicy extends DataChangeDetectionPolicy {
|
|
1052
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1053
|
+
odatatype: "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy";
|
|
1054
|
+
}
|
|
1055
|
+
/** Defines a data deletion detection policy that implements a soft-deletion strategy. It determines whether an item should be deleted based on the value of a designated 'soft delete' column. */
|
|
1056
|
+
export interface SoftDeleteColumnDeletionDetectionPolicy extends DataDeletionDetectionPolicy {
|
|
1057
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1058
|
+
odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy";
|
|
1059
|
+
/** The name of the column to use for soft-deletion detection. */
|
|
1060
|
+
softDeleteColumnName?: string;
|
|
1061
|
+
/** The marker value that identifies an item as deleted. */
|
|
1062
|
+
softDeleteMarkerValue?: string;
|
|
1063
|
+
}
|
|
1064
|
+
/** A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. */
|
|
1065
|
+
export interface ConditionalSkill extends SearchIndexerSkill {
|
|
1066
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1067
|
+
odatatype: "#Microsoft.Skills.Util.ConditionalSkill";
|
|
1068
|
+
}
|
|
1069
|
+
/** A skill that uses text analytics for key phrase extraction. */
|
|
1070
|
+
export interface KeyPhraseExtractionSkill extends SearchIndexerSkill {
|
|
1071
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1072
|
+
odatatype: "#Microsoft.Skills.Text.KeyPhraseExtractionSkill";
|
|
1073
|
+
/** A value indicating which language code to use. Default is `en`. */
|
|
1074
|
+
defaultLanguageCode?: KeyPhraseExtractionSkillLanguage;
|
|
1075
|
+
/** A number indicating how many key phrases to return. If absent, all identified key phrases will be returned. */
|
|
1076
|
+
maxKeyPhraseCount?: number;
|
|
1077
|
+
/** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */
|
|
1078
|
+
modelVersion?: string;
|
|
1079
|
+
}
|
|
1080
|
+
/** A skill that extracts text from image files. */
|
|
1081
|
+
export interface OcrSkill extends SearchIndexerSkill {
|
|
1082
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1083
|
+
odatatype: "#Microsoft.Skills.Vision.OcrSkill";
|
|
1084
|
+
/** A value indicating which language code to use. Default is `en`. */
|
|
1085
|
+
defaultLanguageCode?: OcrSkillLanguage;
|
|
1086
|
+
/** A value indicating to turn orientation detection on or not. Default is false. */
|
|
1087
|
+
shouldDetectOrientation?: boolean;
|
|
1088
|
+
/** Defines the sequence of characters to use between the lines of text recognized by the OCR skill. The default value is "space". */
|
|
1089
|
+
lineEnding?: OcrLineEnding;
|
|
1090
|
+
}
|
|
1091
|
+
/** A skill that analyzes image files. It extracts a rich set of visual features based on the image content. */
|
|
1092
|
+
export interface ImageAnalysisSkill extends SearchIndexerSkill {
|
|
1093
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1094
|
+
odatatype: "#Microsoft.Skills.Vision.ImageAnalysisSkill";
|
|
1095
|
+
/** A value indicating which language code to use. Default is `en`. */
|
|
1096
|
+
defaultLanguageCode?: ImageAnalysisSkillLanguage;
|
|
1097
|
+
/** A list of visual features. */
|
|
1098
|
+
visualFeatures?: VisualFeature[];
|
|
1099
|
+
/** A string indicating which domain-specific details to return. */
|
|
1100
|
+
details?: ImageDetail[];
|
|
1101
|
+
}
|
|
1102
|
+
/** A skill that detects the language of input text and reports a single language code for every document submitted on the request. The language code is paired with a score indicating the confidence of the analysis. */
|
|
1103
|
+
export interface LanguageDetectionSkill extends SearchIndexerSkill {
|
|
1104
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1105
|
+
odatatype: "#Microsoft.Skills.Text.LanguageDetectionSkill";
|
|
1106
|
+
/** A country code to use as a hint to the language detection model if it cannot disambiguate the language. */
|
|
1107
|
+
defaultCountryHint?: string;
|
|
1108
|
+
/** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */
|
|
1109
|
+
modelVersion?: string;
|
|
1110
|
+
}
|
|
1111
|
+
/** A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). */
|
|
1112
|
+
export interface ShaperSkill extends SearchIndexerSkill {
|
|
1113
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1114
|
+
odatatype: "#Microsoft.Skills.Util.ShaperSkill";
|
|
1115
|
+
}
|
|
1116
|
+
/** A skill for merging two or more strings into a single unified string, with an optional user-defined delimiter separating each component part. */
|
|
1117
|
+
export interface MergeSkill extends SearchIndexerSkill {
|
|
1118
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1119
|
+
odatatype: "#Microsoft.Skills.Text.MergeSkill";
|
|
1120
|
+
/** The tag indicates the start of the merged text. By default, the tag is an empty space. */
|
|
1121
|
+
insertPreTag?: string;
|
|
1122
|
+
/** The tag indicates the end of the merged text. By default, the tag is an empty space. */
|
|
1123
|
+
insertPostTag?: string;
|
|
1124
|
+
}
|
|
1125
|
+
/**
|
|
1126
|
+
* This skill is deprecated. Use the V3.EntityRecognitionSkill instead.
|
|
1127
|
+
*
|
|
1128
|
+
* @deprecated
|
|
1129
|
+
*/
|
|
1130
|
+
export interface EntityRecognitionSkill extends SearchIndexerSkill {
|
|
1131
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1132
|
+
odatatype: "#Microsoft.Skills.Text.EntityRecognitionSkill";
|
|
1133
|
+
/** A list of entity categories that should be extracted. */
|
|
1134
|
+
categories?: EntityCategory[];
|
|
1135
|
+
/** A value indicating which language code to use. Default is `en`. */
|
|
1136
|
+
defaultLanguageCode?: EntityRecognitionSkillLanguage;
|
|
1137
|
+
/** Determines whether or not to include entities which are well known but don't conform to a pre-defined type. If this configuration is not set (default), set to null or set to false, entities which don't conform to one of the pre-defined types will not be surfaced. */
|
|
1138
|
+
includeTypelessEntities?: boolean;
|
|
1139
|
+
/** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */
|
|
1140
|
+
minimumPrecision?: number;
|
|
1141
|
+
}
|
|
1142
|
+
/**
|
|
1143
|
+
* This skill is deprecated. Use the V3.SentimentSkill instead.
|
|
1144
|
+
*
|
|
1145
|
+
* @deprecated
|
|
1146
|
+
*/
|
|
1147
|
+
export interface SentimentSkill extends SearchIndexerSkill {
|
|
1148
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1149
|
+
odatatype: "#Microsoft.Skills.Text.SentimentSkill";
|
|
1150
|
+
/** A value indicating which language code to use. Default is `en`. */
|
|
1151
|
+
defaultLanguageCode?: SentimentSkillLanguage;
|
|
1152
|
+
}
|
|
1153
|
+
/** Using the Text Analytics API, evaluates unstructured text and for each record, provides sentiment labels (such as "negative", "neutral" and "positive") based on the highest confidence score found by the service at a sentence and document-level. */
|
|
1154
|
+
export interface SentimentSkillV3 extends SearchIndexerSkill {
|
|
1155
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1156
|
+
odatatype: "#Microsoft.Skills.Text.V3.SentimentSkill";
|
|
1157
|
+
/** A value indicating which language code to use. Default is `en`. */
|
|
1158
|
+
defaultLanguageCode?: string;
|
|
1159
|
+
/** If set to true, the skill output will include information from Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated assessment (adjective) in the text. Default is false. */
|
|
1160
|
+
includeOpinionMining?: boolean;
|
|
1161
|
+
/** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */
|
|
1162
|
+
modelVersion?: string;
|
|
1163
|
+
}
|
|
1164
|
+
/** Using the Text Analytics API, extracts linked entities from text. */
|
|
1165
|
+
export interface EntityLinkingSkill extends SearchIndexerSkill {
|
|
1166
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1167
|
+
odatatype: "#Microsoft.Skills.Text.V3.EntityLinkingSkill";
|
|
1168
|
+
/** A value indicating which language code to use. Default is `en`. */
|
|
1169
|
+
defaultLanguageCode?: string;
|
|
1170
|
+
/** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */
|
|
1171
|
+
minimumPrecision?: number;
|
|
1172
|
+
/** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */
|
|
1173
|
+
modelVersion?: string;
|
|
1174
|
+
}
|
|
1175
|
+
/** Using the Text Analytics API, extracts entities of different types from text. */
|
|
1176
|
+
export interface EntityRecognitionSkillV3 extends SearchIndexerSkill {
|
|
1177
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1178
|
+
odatatype: "#Microsoft.Skills.Text.V3.EntityRecognitionSkill";
|
|
1179
|
+
/** A list of entity categories that should be extracted. */
|
|
1180
|
+
categories?: string[];
|
|
1181
|
+
/** A value indicating which language code to use. Default is `en`. */
|
|
1182
|
+
defaultLanguageCode?: string;
|
|
1183
|
+
/** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */
|
|
1184
|
+
minimumPrecision?: number;
|
|
1185
|
+
/** The version of the model to use when calling the Text Analytics API. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */
|
|
1186
|
+
modelVersion?: string;
|
|
1187
|
+
}
|
|
1188
|
+
/** Using the Text Analytics API, extracts personal information from an input text and gives you the option of masking it. */
|
|
1189
|
+
export interface PIIDetectionSkill extends SearchIndexerSkill {
|
|
1190
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1191
|
+
odatatype: "#Microsoft.Skills.Text.PIIDetectionSkill";
|
|
1192
|
+
/** A value indicating which language code to use. Default is `en`. */
|
|
1193
|
+
defaultLanguageCode?: string;
|
|
1194
|
+
/** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */
|
|
1195
|
+
minimumPrecision?: number;
|
|
1196
|
+
/** A parameter that provides various ways to mask the personal information detected in the input text. Default is 'none'. */
|
|
1197
|
+
maskingMode?: PIIDetectionSkillMaskingMode;
|
|
1198
|
+
/** The character used to mask the text if the maskingMode parameter is set to replace. Default is '*'. */
|
|
1199
|
+
maskingCharacter?: string;
|
|
1200
|
+
/** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */
|
|
1201
|
+
modelVersion?: string;
|
|
1202
|
+
/** A list of PII entity categories that should be extracted and masked. */
|
|
1203
|
+
categories?: string[];
|
|
1204
|
+
/** If specified, will set the PII domain to include only a subset of the entity categories. Possible values include: 'phi', 'none'. Default is 'none'. */
|
|
1205
|
+
domain?: string;
|
|
1206
|
+
}
|
|
1207
|
+
/** A skill to split a string into chunks of text. */
|
|
1208
|
+
export interface SplitSkill extends SearchIndexerSkill {
|
|
1209
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1210
|
+
odatatype: "#Microsoft.Skills.Text.SplitSkill";
|
|
1211
|
+
/** A value indicating which language code to use. Default is `en`. */
|
|
1212
|
+
defaultLanguageCode?: SplitSkillLanguage;
|
|
1213
|
+
/** A value indicating which split mode to perform. */
|
|
1214
|
+
textSplitMode?: TextSplitMode;
|
|
1215
|
+
/** The desired maximum page length. Default is 10000. */
|
|
1216
|
+
maxPageLength?: number;
|
|
1217
|
+
/** Only applicable when textSplitMode is set to 'pages'. If specified, n+1th chunk will start with this number of characters/tokens from the end of the nth chunk. */
|
|
1218
|
+
pageOverlapLength?: number;
|
|
1219
|
+
/** Only applicable when textSplitMode is set to 'pages'. If specified, the SplitSkill will discontinue splitting after processing the first 'maximumPagesToTake' pages, in order to improve performance when only a few initial pages are needed from each document. */
|
|
1220
|
+
maximumPagesToTake?: number;
|
|
1221
|
+
}
|
|
1222
|
+
/** A skill looks for text from a custom, user-defined list of words and phrases. */
|
|
1223
|
+
export interface CustomEntityLookupSkill extends SearchIndexerSkill {
|
|
1224
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1225
|
+
odatatype: "#Microsoft.Skills.Text.CustomEntityLookupSkill";
|
|
1226
|
+
/** A value indicating which language code to use. Default is `en`. */
|
|
1227
|
+
defaultLanguageCode?: CustomEntityLookupSkillLanguage;
|
|
1228
|
+
/** Path to a JSON or CSV file containing all the target text to match against. This entity definition is read at the beginning of an indexer run. Any updates to this file during an indexer run will not take effect until subsequent runs. This config must be accessible over HTTPS. */
|
|
1229
|
+
entitiesDefinitionUri?: string;
|
|
1230
|
+
/** The inline CustomEntity definition. */
|
|
1231
|
+
inlineEntitiesDefinition?: CustomEntity[];
|
|
1232
|
+
/** A global flag for CaseSensitive. If CaseSensitive is not set in CustomEntity, this value will be the default value. */
|
|
1233
|
+
globalDefaultCaseSensitive?: boolean;
|
|
1234
|
+
/** A global flag for AccentSensitive. If AccentSensitive is not set in CustomEntity, this value will be the default value. */
|
|
1235
|
+
globalDefaultAccentSensitive?: boolean;
|
|
1236
|
+
/** A global flag for FuzzyEditDistance. If FuzzyEditDistance is not set in CustomEntity, this value will be the default value. */
|
|
1237
|
+
globalDefaultFuzzyEditDistance?: number;
|
|
1238
|
+
}
|
|
1239
|
+
/** A skill to translate text from one language to another. */
|
|
1240
|
+
export interface TextTranslationSkill extends SearchIndexerSkill {
|
|
1241
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1242
|
+
odatatype: "#Microsoft.Skills.Text.TranslationSkill";
|
|
1243
|
+
/** The language code to translate documents into for documents that don't specify the to language explicitly. */
|
|
1244
|
+
defaultToLanguageCode: TextTranslationSkillLanguage;
|
|
1245
|
+
/** The language code to translate documents from for documents that don't specify the from language explicitly. */
|
|
1246
|
+
defaultFromLanguageCode?: TextTranslationSkillLanguage;
|
|
1247
|
+
/** The language code to translate documents from when neither the fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the automatic language detection is unsuccessful. Default is `en`. */
|
|
1248
|
+
suggestedFrom?: TextTranslationSkillLanguage;
|
|
1249
|
+
}
|
|
1250
|
+
/** A skill that extracts content from a file within the enrichment pipeline. */
|
|
1251
|
+
export interface DocumentExtractionSkill extends SearchIndexerSkill {
|
|
1252
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1253
|
+
odatatype: "#Microsoft.Skills.Util.DocumentExtractionSkill";
|
|
1254
|
+
/** The parsingMode for the skill. Will be set to 'default' if not defined. */
|
|
1255
|
+
parsingMode?: string;
|
|
1256
|
+
/** The type of data to be extracted for the skill. Will be set to 'contentAndMetadata' if not defined. */
|
|
1257
|
+
dataToExtract?: string;
|
|
1258
|
+
/** A dictionary of configurations for the skill. */
|
|
1259
|
+
configuration?: {
|
|
1260
|
+
[propertyName: string]: any;
|
|
1261
|
+
};
|
|
1262
|
+
}
|
|
1263
|
+
/** A skill that extracts content and layout information, via Azure AI Services, from files within the enrichment pipeline. */
|
|
1264
|
+
export interface DocumentIntelligenceLayoutSkill extends SearchIndexerSkill {
|
|
1265
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1266
|
+
odatatype: "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill";
|
|
1267
|
+
/** Controls the cardinality of the output format. Default is 'markdown'. */
|
|
1268
|
+
outputFormat?: DocumentIntelligenceLayoutSkillOutputFormat;
|
|
1269
|
+
/** Controls the cardinality of the output produced by the skill. Default is 'oneToMany'. */
|
|
1270
|
+
outputMode?: DocumentIntelligenceLayoutSkillOutputMode;
|
|
1271
|
+
/** The depth of headers in the markdown output. Default is h6. */
|
|
1272
|
+
markdownHeaderDepth?: DocumentIntelligenceLayoutSkillMarkdownHeaderDepth;
|
|
1273
|
+
/** Controls the cardinality of the content extracted from the document by the skill */
|
|
1274
|
+
extractionOptions?: DocumentIntelligenceLayoutSkillExtractionOptions[];
|
|
1275
|
+
/** Controls the cardinality for chunking the content. */
|
|
1276
|
+
chunkingProperties?: DocumentIntelligenceLayoutSkillChunkingProperties;
|
|
1277
|
+
}
|
|
1278
|
+
/** A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call your custom code. */
|
|
1279
|
+
export interface WebApiSkill extends SearchIndexerSkill {
|
|
1280
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1281
|
+
odatatype: "#Microsoft.Skills.Custom.WebApiSkill";
|
|
1282
|
+
/** The url for the Web API. */
|
|
1283
|
+
uri: string;
|
|
1284
|
+
/** The headers required to make the http request. */
|
|
1285
|
+
httpHeaders?: {
|
|
1286
|
+
[propertyName: string]: string;
|
|
1287
|
+
};
|
|
1288
|
+
/** The method for the http request. */
|
|
1289
|
+
httpMethod?: string;
|
|
1290
|
+
/** The desired timeout for the request. Default is 30 seconds. */
|
|
1291
|
+
timeout?: string;
|
|
1292
|
+
/** The desired batch size which indicates number of documents. */
|
|
1293
|
+
batchSize?: number;
|
|
1294
|
+
/** If set, the number of parallel calls that can be made to the Web API. */
|
|
1295
|
+
degreeOfParallelism?: number;
|
|
1296
|
+
/** Applies to custom skills that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the custom skill connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */
|
|
1297
|
+
authResourceId?: string;
|
|
1298
|
+
/** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. */
|
|
1299
|
+
authIdentity?: SearchIndexerDataIdentityUnion;
|
|
1300
|
+
}
|
|
1301
|
+
/** Allows you to generate a vector embedding for a given text input using the Azure OpenAI resource. */
|
|
1302
|
+
export interface AzureOpenAIEmbeddingSkill extends SearchIndexerSkill, AzureOpenAIParameters {
|
|
1303
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1304
|
+
odatatype: "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill";
|
|
1305
|
+
/** The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models. */
|
|
1306
|
+
dimensions?: number;
|
|
1307
|
+
}
|
|
1308
|
+
/** An empty object that represents the default Azure AI service resource for a skillset. */
|
|
1309
|
+
export interface DefaultCognitiveServicesAccount extends CognitiveServicesAccount {
|
|
1310
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1311
|
+
odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices";
|
|
1312
|
+
}
|
|
1313
|
+
/** The multi-region account key of an Azure AI service resource that's attached to a skillset. */
|
|
1314
|
+
export interface CognitiveServicesAccountKey extends CognitiveServicesAccount {
|
|
1315
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1316
|
+
odatatype: "#Microsoft.Azure.Search.CognitiveServicesByKey";
|
|
1317
|
+
/** The key used to provision the Azure AI service resource attached to a skillset. */
|
|
1318
|
+
key: string;
|
|
1319
|
+
}
|
|
1320
|
+
/** Description for what data to store in Azure Tables. */
|
|
1321
|
+
export interface SearchIndexerKnowledgeStoreTableProjectionSelector extends SearchIndexerKnowledgeStoreProjectionSelector {
|
|
1322
|
+
/** Name of the Azure table to store projected data in. */
|
|
1323
|
+
tableName: string;
|
|
1324
|
+
}
|
|
1325
|
+
/** Abstract class to share properties between concrete selectors. */
|
|
1326
|
+
export interface SearchIndexerKnowledgeStoreBlobProjectionSelector extends SearchIndexerKnowledgeStoreProjectionSelector {
|
|
1327
|
+
/** Blob container to store projections in. */
|
|
1328
|
+
storageContainer: string;
|
|
1329
|
+
}
|
|
1330
|
+
/** Defines a function that boosts scores based on distance from a geographic location. */
|
|
1331
|
+
export interface DistanceScoringFunction extends ScoringFunction {
|
|
1332
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1333
|
+
type: "distance";
|
|
1334
|
+
/** Parameter values for the distance scoring function. */
|
|
1335
|
+
parameters: DistanceScoringParameters;
|
|
1336
|
+
}
|
|
1337
|
+
/** Defines a function that boosts scores based on the value of a date-time field. */
|
|
1338
|
+
export interface FreshnessScoringFunction extends ScoringFunction {
|
|
1339
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1340
|
+
type: "freshness";
|
|
1341
|
+
/** Parameter values for the freshness scoring function. */
|
|
1342
|
+
parameters: FreshnessScoringParameters;
|
|
1343
|
+
}
|
|
1344
|
+
/** Defines a function that boosts scores based on the magnitude of a numeric field. */
|
|
1345
|
+
export interface MagnitudeScoringFunction extends ScoringFunction {
|
|
1346
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1347
|
+
type: "magnitude";
|
|
1348
|
+
/** Parameter values for the magnitude scoring function. */
|
|
1349
|
+
parameters: MagnitudeScoringParameters;
|
|
1350
|
+
}
|
|
1351
|
+
/** Defines a function that boosts scores of documents with string values matching a given list of tags. */
|
|
1352
|
+
export interface TagScoringFunction extends ScoringFunction {
|
|
1353
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1354
|
+
type: "tag";
|
|
1355
|
+
/** Parameter values for the tag scoring function. */
|
|
1356
|
+
parameters: TagScoringParameters;
|
|
1357
|
+
}
|
|
1358
|
+
/** Allows you to take control over the process of converting text into indexable/searchable tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens emitted by the tokenizer. */
|
|
1359
|
+
export interface CustomAnalyzer extends LexicalAnalyzer {
|
|
1360
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1361
|
+
odatatype: "#Microsoft.Azure.Search.CustomAnalyzer";
|
|
1362
|
+
/** The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. KnownTokenizerNames is an enum containing known values. */
|
|
1363
|
+
tokenizerName: string;
|
|
1364
|
+
/** A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */
|
|
1365
|
+
tokenFilters?: string[];
|
|
1366
|
+
/** A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */
|
|
1367
|
+
charFilters?: string[];
|
|
1368
|
+
}
|
|
1369
|
+
/** Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene. */
|
|
1370
|
+
export interface PatternAnalyzer extends LexicalAnalyzer {
|
|
1371
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1372
|
+
odatatype: "#Microsoft.Azure.Search.PatternAnalyzer";
|
|
1373
|
+
/** A value indicating whether terms should be lower-cased. Default is true. */
|
|
1374
|
+
lowerCaseTerms?: boolean;
|
|
1375
|
+
/** A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. */
|
|
1376
|
+
pattern?: string;
|
|
1377
|
+
/** Regular expression flags. */
|
|
1378
|
+
flags?: string;
|
|
1379
|
+
/** A list of stopwords. */
|
|
1380
|
+
stopwords?: string[];
|
|
1381
|
+
}
|
|
1382
|
+
/** Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. */
|
|
1383
|
+
export interface LuceneStandardAnalyzer extends LexicalAnalyzer {
|
|
1384
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1385
|
+
odatatype: "#Microsoft.Azure.Search.StandardAnalyzer";
|
|
1386
|
+
/** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */
|
|
1387
|
+
maxTokenLength?: number;
|
|
1388
|
+
/** A list of stopwords. */
|
|
1389
|
+
stopwords?: string[];
|
|
1390
|
+
}
|
|
1391
|
+
/** Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is implemented using Apache Lucene. */
|
|
1392
|
+
export interface StopAnalyzer extends LexicalAnalyzer {
|
|
1393
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1394
|
+
odatatype: "#Microsoft.Azure.Search.StopAnalyzer";
|
|
1395
|
+
/** A list of stopwords. */
|
|
1396
|
+
stopwords?: string[];
|
|
1397
|
+
}
|
|
1398
|
+
/** Grammar-based tokenizer that is suitable for processing most European-language documents. This tokenizer is implemented using Apache Lucene. */
|
|
1399
|
+
export interface ClassicTokenizer extends LexicalTokenizer {
|
|
1400
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1401
|
+
odatatype: "#Microsoft.Azure.Search.ClassicTokenizer";
|
|
1402
|
+
/** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */
|
|
1403
|
+
maxTokenLength?: number;
|
|
1404
|
+
}
|
|
1405
|
+
/** Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. */
|
|
1406
|
+
export interface EdgeNGramTokenizer extends LexicalTokenizer {
|
|
1407
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1408
|
+
odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenizer";
|
|
1409
|
+
/** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */
|
|
1410
|
+
minGram?: number;
|
|
1411
|
+
/** The maximum n-gram length. Default is 2. Maximum is 300. */
|
|
1412
|
+
maxGram?: number;
|
|
1413
|
+
/** Character classes to keep in the tokens. */
|
|
1414
|
+
tokenChars?: TokenCharacterKind[];
|
|
1415
|
+
}
|
|
1416
|
+
/** Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. */
|
|
1417
|
+
export interface KeywordTokenizer extends LexicalTokenizer {
|
|
1418
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1419
|
+
odatatype: "#Microsoft.Azure.Search.KeywordTokenizer";
|
|
1420
|
+
/** The read buffer size in bytes. Default is 256. */
|
|
1421
|
+
bufferSize?: number;
|
|
1422
|
+
}
|
|
1423
|
+
/** Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. */
|
|
1424
|
+
export interface KeywordTokenizerV2 extends LexicalTokenizer {
|
|
1425
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1426
|
+
odatatype: "#Microsoft.Azure.Search.KeywordTokenizerV2";
|
|
1427
|
+
/** The maximum token length. Default is 256. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */
|
|
1428
|
+
maxTokenLength?: number;
|
|
1429
|
+
}
|
|
1430
|
+
/** Divides text using language-specific rules. */
|
|
1431
|
+
export interface MicrosoftLanguageTokenizer extends LexicalTokenizer {
|
|
1432
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1433
|
+
odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer";
|
|
1434
|
+
/** The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. */
|
|
1435
|
+
maxTokenLength?: number;
|
|
1436
|
+
/** A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. */
|
|
1437
|
+
isSearchTokenizer?: boolean;
|
|
1438
|
+
/** The language to use. The default is English. */
|
|
1439
|
+
language?: MicrosoftTokenizerLanguage;
|
|
1440
|
+
}
|
|
1441
|
+
/** Divides text using language-specific rules and reduces words to their base forms. */
|
|
1442
|
+
export interface MicrosoftLanguageStemmingTokenizer extends LexicalTokenizer {
|
|
1443
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1444
|
+
odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer";
|
|
1445
|
+
/** The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. */
|
|
1446
|
+
maxTokenLength?: number;
|
|
1447
|
+
/** A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. */
|
|
1448
|
+
isSearchTokenizer?: boolean;
|
|
1449
|
+
/** The language to use. The default is English. */
|
|
1450
|
+
language?: MicrosoftStemmingTokenizerLanguage;
|
|
1451
|
+
}
|
|
1452
|
+
/** Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. */
|
|
1453
|
+
export interface NGramTokenizer extends LexicalTokenizer {
|
|
1454
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1455
|
+
odatatype: "#Microsoft.Azure.Search.NGramTokenizer";
|
|
1456
|
+
/** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */
|
|
1457
|
+
minGram?: number;
|
|
1458
|
+
/** The maximum n-gram length. Default is 2. Maximum is 300. */
|
|
1459
|
+
maxGram?: number;
|
|
1460
|
+
/** Character classes to keep in the tokens. */
|
|
1461
|
+
tokenChars?: TokenCharacterKind[];
|
|
1462
|
+
}
|
|
1463
|
+
/** Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene. */
|
|
1464
|
+
export interface PathHierarchyTokenizerV2 extends LexicalTokenizer {
|
|
1465
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1466
|
+
odatatype: "#Microsoft.Azure.Search.PathHierarchyTokenizerV2";
|
|
1467
|
+
/** The delimiter character to use. Default is "/". */
|
|
1468
|
+
delimiter?: string;
|
|
1469
|
+
/** A value that, if set, replaces the delimiter character. Default is "/". */
|
|
1470
|
+
replacement?: string;
|
|
1471
|
+
/** The maximum token length. Default and maximum is 300. */
|
|
1472
|
+
maxTokenLength?: number;
|
|
1473
|
+
/** A value indicating whether to generate tokens in reverse order. Default is false. */
|
|
1474
|
+
reverseTokenOrder?: boolean;
|
|
1475
|
+
/** The number of initial tokens to skip. Default is 0. */
|
|
1476
|
+
numberOfTokensToSkip?: number;
|
|
1477
|
+
}
|
|
1478
|
+
/** Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene. */
|
|
1479
|
+
export interface PatternTokenizer extends LexicalTokenizer {
|
|
1480
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1481
|
+
odatatype: "#Microsoft.Azure.Search.PatternTokenizer";
|
|
1482
|
+
/** A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. */
|
|
1483
|
+
pattern?: string;
|
|
1484
|
+
/** Regular expression flags. */
|
|
1485
|
+
flags?: string;
|
|
1486
|
+
/** The zero-based ordinal of the matching group in the regular expression pattern to extract into tokens. Use -1 if you want to use the entire pattern to split the input into tokens, irrespective of matching groups. Default is -1. */
|
|
1487
|
+
group?: number;
|
|
1488
|
+
}
|
|
1489
|
+
/** Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. */
|
|
1490
|
+
export interface LuceneStandardTokenizer extends LexicalTokenizer {
|
|
1491
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1492
|
+
odatatype: "#Microsoft.Azure.Search.StandardTokenizer";
|
|
1493
|
+
/** The maximum token length. Default is 255. Tokens longer than the maximum length are split. */
|
|
1494
|
+
maxTokenLength?: number;
|
|
1495
|
+
}
|
|
1496
|
+
/** Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. */
|
|
1497
|
+
export interface LuceneStandardTokenizerV2 extends LexicalTokenizer {
|
|
1498
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1499
|
+
odatatype: "#Microsoft.Azure.Search.StandardTokenizerV2";
|
|
1500
|
+
/** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */
|
|
1501
|
+
maxTokenLength?: number;
|
|
1502
|
+
}
|
|
1503
|
+
/** Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene. */
|
|
1504
|
+
export interface UaxUrlEmailTokenizer extends LexicalTokenizer {
|
|
1505
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1506
|
+
odatatype: "#Microsoft.Azure.Search.UaxUrlEmailTokenizer";
|
|
1507
|
+
/** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */
|
|
1508
|
+
maxTokenLength?: number;
|
|
1509
|
+
}
|
|
1510
|
+
/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. This token filter is implemented using Apache Lucene. */
|
|
1511
|
+
export interface AsciiFoldingTokenFilter extends TokenFilter {
|
|
1512
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1513
|
+
odatatype: "#Microsoft.Azure.Search.AsciiFoldingTokenFilter";
|
|
1514
|
+
/** A value indicating whether the original token will be kept. Default is false. */
|
|
1515
|
+
preserveOriginal?: boolean;
|
|
1516
|
+
}
|
|
1517
|
+
/** Forms bigrams of CJK terms that are generated from the standard tokenizer. This token filter is implemented using Apache Lucene. */
|
|
1518
|
+
export interface CjkBigramTokenFilter extends TokenFilter {
|
|
1519
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1520
|
+
odatatype: "#Microsoft.Azure.Search.CjkBigramTokenFilter";
|
|
1521
|
+
/** The scripts to ignore. */
|
|
1522
|
+
ignoreScripts?: CjkBigramTokenFilterScripts[];
|
|
1523
|
+
/** A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false. */
|
|
1524
|
+
outputUnigrams?: boolean;
|
|
1525
|
+
}
|
|
1526
|
+
/** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. This token filter is implemented using Apache Lucene. */
|
|
1527
|
+
export interface CommonGramTokenFilter extends TokenFilter {
|
|
1528
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1529
|
+
odatatype: "#Microsoft.Azure.Search.CommonGramTokenFilter";
|
|
1530
|
+
/** The set of common words. */
|
|
1531
|
+
commonWords: string[];
|
|
1532
|
+
/** A value indicating whether common words matching will be case insensitive. Default is false. */
|
|
1533
|
+
ignoreCase?: boolean;
|
|
1534
|
+
/** A value that indicates whether the token filter is in query mode. When in query mode, the token filter generates bigrams and then removes common words and single terms followed by a common word. Default is false. */
|
|
1535
|
+
useQueryMode?: boolean;
|
|
1536
|
+
}
|
|
1537
|
+
/** Decomposes compound words found in many Germanic languages. This token filter is implemented using Apache Lucene. */
|
|
1538
|
+
export interface DictionaryDecompounderTokenFilter extends TokenFilter {
|
|
1539
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1540
|
+
odatatype: "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter";
|
|
1541
|
+
/** The list of words to match against. */
|
|
1542
|
+
wordList: string[];
|
|
1543
|
+
/** The minimum word size. Only words longer than this get processed. Default is 5. Maximum is 300. */
|
|
1544
|
+
minWordSize?: number;
|
|
1545
|
+
/** The minimum subword size. Only subwords longer than this are outputted. Default is 2. Maximum is 300. */
|
|
1546
|
+
minSubwordSize?: number;
|
|
1547
|
+
/** The maximum subword size. Only subwords shorter than this are outputted. Default is 15. Maximum is 300. */
|
|
1548
|
+
maxSubwordSize?: number;
|
|
1549
|
+
/** A value indicating whether to add only the longest matching subword to the output. Default is false. */
|
|
1550
|
+
onlyLongestMatch?: boolean;
|
|
1551
|
+
}
|
|
1552
|
+
/** Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. */
|
|
1553
|
+
export interface EdgeNGramTokenFilter extends TokenFilter {
|
|
1554
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1555
|
+
odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenFilter";
|
|
1556
|
+
/** The minimum n-gram length. Default is 1. Must be less than the value of maxGram. */
|
|
1557
|
+
minGram?: number;
|
|
1558
|
+
/** The maximum n-gram length. Default is 2. */
|
|
1559
|
+
maxGram?: number;
|
|
1560
|
+
/** Specifies which side of the input the n-gram should be generated from. Default is "front". */
|
|
1561
|
+
side?: EdgeNGramTokenFilterSide;
|
|
1562
|
+
}
|
|
1563
|
+
/** Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. */
|
|
1564
|
+
export interface EdgeNGramTokenFilterV2 extends TokenFilter {
|
|
1565
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1566
|
+
odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2";
|
|
1567
|
+
/** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */
|
|
1568
|
+
minGram?: number;
|
|
1569
|
+
/** The maximum n-gram length. Default is 2. Maximum is 300. */
|
|
1570
|
+
maxGram?: number;
|
|
1571
|
+
/** Specifies which side of the input the n-gram should be generated from. Default is "front". */
|
|
1572
|
+
side?: EdgeNGramTokenFilterSide;
|
|
1573
|
+
}
|
|
1574
|
+
/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). This token filter is implemented using Apache Lucene. */
|
|
1575
|
+
export interface ElisionTokenFilter extends TokenFilter {
|
|
1576
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1577
|
+
odatatype: "#Microsoft.Azure.Search.ElisionTokenFilter";
|
|
1578
|
+
/** The set of articles to remove. */
|
|
1579
|
+
articles?: string[];
|
|
1580
|
+
}
|
|
1581
|
+
/** A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. */
|
|
1582
|
+
export interface KeepTokenFilter extends TokenFilter {
|
|
1583
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1584
|
+
odatatype: "#Microsoft.Azure.Search.KeepTokenFilter";
|
|
1585
|
+
/** The list of words to keep. */
|
|
1586
|
+
keepWords: string[];
|
|
1587
|
+
/** A value indicating whether to lower case all words first. Default is false. */
|
|
1588
|
+
lowerCaseKeepWords?: boolean;
|
|
1589
|
+
}
|
|
1590
|
+
/** Marks terms as keywords. This token filter is implemented using Apache Lucene. */
|
|
1591
|
+
export interface KeywordMarkerTokenFilter extends TokenFilter {
|
|
1592
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1593
|
+
odatatype: "#Microsoft.Azure.Search.KeywordMarkerTokenFilter";
|
|
1594
|
+
/** A list of words to mark as keywords. */
|
|
1595
|
+
keywords: string[];
|
|
1596
|
+
/** A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. */
|
|
1597
|
+
ignoreCase?: boolean;
|
|
1598
|
+
}
|
|
1599
|
+
/** Removes words that are too long or too short. This token filter is implemented using Apache Lucene. */
|
|
1600
|
+
export interface LengthTokenFilter extends TokenFilter {
|
|
1601
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1602
|
+
odatatype: "#Microsoft.Azure.Search.LengthTokenFilter";
|
|
1603
|
+
/** The minimum length in characters. Default is 0. Maximum is 300. Must be less than the value of max. */
|
|
1604
|
+
minLength?: number;
|
|
1605
|
+
/** The maximum length in characters. Default and maximum is 300. */
|
|
1606
|
+
maxLength?: number;
|
|
1607
|
+
}
|
|
1608
|
+
/** Limits the number of tokens while indexing. This token filter is implemented using Apache Lucene. */
|
|
1609
|
+
export interface LimitTokenFilter extends TokenFilter {
|
|
1610
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1611
|
+
odatatype: "#Microsoft.Azure.Search.LimitTokenFilter";
|
|
1612
|
+
/** The maximum number of tokens to produce. Default is 1. */
|
|
1613
|
+
maxTokenCount?: number;
|
|
1614
|
+
/** A value indicating whether all tokens from the input must be consumed even if maxTokenCount is reached. Default is false. */
|
|
1615
|
+
consumeAllTokens?: boolean;
|
|
1616
|
+
}
|
|
1617
|
+
/** Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. */
|
|
1618
|
+
export interface NGramTokenFilter extends TokenFilter {
|
|
1619
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1620
|
+
odatatype: "#Microsoft.Azure.Search.NGramTokenFilter";
|
|
1621
|
+
/** The minimum n-gram length. Default is 1. Must be less than the value of maxGram. */
|
|
1622
|
+
minGram?: number;
|
|
1623
|
+
/** The maximum n-gram length. Default is 2. */
|
|
1624
|
+
maxGram?: number;
|
|
1625
|
+
}
|
|
1626
|
+
/** Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. */
|
|
1627
|
+
export interface NGramTokenFilterV2 extends TokenFilter {
|
|
1628
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1629
|
+
odatatype: "#Microsoft.Azure.Search.NGramTokenFilterV2";
|
|
1630
|
+
/** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */
|
|
1631
|
+
minGram?: number;
|
|
1632
|
+
/** The maximum n-gram length. Default is 2. Maximum is 300. */
|
|
1633
|
+
maxGram?: number;
|
|
1634
|
+
}
|
|
1635
|
+
/** Uses Java regexes to emit multiple tokens - one for each capture group in one or more patterns. This token filter is implemented using Apache Lucene. */
|
|
1636
|
+
export interface PatternCaptureTokenFilter extends TokenFilter {
|
|
1637
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1638
|
+
odatatype: "#Microsoft.Azure.Search.PatternCaptureTokenFilter";
|
|
1639
|
+
/** A list of patterns to match against each token. */
|
|
1640
|
+
patterns: string[];
|
|
1641
|
+
/** A value indicating whether to return the original token even if one of the patterns matches. Default is true. */
|
|
1642
|
+
preserveOriginal?: boolean;
|
|
1643
|
+
}
|
|
1644
|
+
/** A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This token filter is implemented using Apache Lucene. */
|
|
1645
|
+
export interface PatternReplaceTokenFilter extends TokenFilter {
|
|
1646
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1647
|
+
odatatype: "#Microsoft.Azure.Search.PatternReplaceTokenFilter";
|
|
1648
|
+
/** A regular expression pattern. */
|
|
1649
|
+
pattern: string;
|
|
1650
|
+
/** The replacement text. */
|
|
1651
|
+
replacement: string;
|
|
1652
|
+
}
|
|
1653
|
+
/** Create tokens for phonetic matches. This token filter is implemented using Apache Lucene. */
|
|
1654
|
+
export interface PhoneticTokenFilter extends TokenFilter {
|
|
1655
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1656
|
+
odatatype: "#Microsoft.Azure.Search.PhoneticTokenFilter";
|
|
1657
|
+
/** The phonetic encoder to use. Default is "metaphone". */
|
|
1658
|
+
encoder?: PhoneticEncoder;
|
|
1659
|
+
/** A value indicating whether encoded tokens should replace original tokens. If false, encoded tokens are added as synonyms. Default is true. */
|
|
1660
|
+
replaceOriginalTokens?: boolean;
|
|
1661
|
+
}
|
|
1662
|
+
/** Creates combinations of tokens as a single token. This token filter is implemented using Apache Lucene. */
|
|
1663
|
+
export interface ShingleTokenFilter extends TokenFilter {
|
|
1664
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1665
|
+
odatatype: "#Microsoft.Azure.Search.ShingleTokenFilter";
|
|
1666
|
+
/** The maximum shingle size. Default and minimum value is 2. */
|
|
1667
|
+
maxShingleSize?: number;
|
|
1668
|
+
/** The minimum shingle size. Default and minimum value is 2. Must be less than the value of maxShingleSize. */
|
|
1669
|
+
minShingleSize?: number;
|
|
1670
|
+
/** A value indicating whether the output stream will contain the input tokens (unigrams) as well as shingles. Default is true. */
|
|
1671
|
+
outputUnigrams?: boolean;
|
|
1672
|
+
/** A value indicating whether to output unigrams for those times when no shingles are available. This property takes precedence when outputUnigrams is set to false. Default is false. */
|
|
1673
|
+
outputUnigramsIfNoShingles?: boolean;
|
|
1674
|
+
/** The string to use when joining adjacent tokens to form a shingle. Default is a single space (" "). */
|
|
1675
|
+
tokenSeparator?: string;
|
|
1676
|
+
/** The string to insert for each position at which there is no token. Default is an underscore ("_"). */
|
|
1677
|
+
filterToken?: string;
|
|
1678
|
+
}
|
|
1679
|
+
/** A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. */
|
|
1680
|
+
export interface SnowballTokenFilter extends TokenFilter {
|
|
1681
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1682
|
+
odatatype: "#Microsoft.Azure.Search.SnowballTokenFilter";
|
|
1683
|
+
/** The language to use. */
|
|
1684
|
+
language: SnowballTokenFilterLanguage;
|
|
1685
|
+
}
|
|
1686
|
+
/** Language specific stemming filter. This token filter is implemented using Apache Lucene. */
|
|
1687
|
+
export interface StemmerTokenFilter extends TokenFilter {
|
|
1688
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1689
|
+
odatatype: "#Microsoft.Azure.Search.StemmerTokenFilter";
|
|
1690
|
+
/** The language to use. */
|
|
1691
|
+
language: StemmerTokenFilterLanguage;
|
|
1692
|
+
}
|
|
1693
|
+
/** Provides the ability to override other stemming filters with custom dictionary-based stemming. Any dictionary-stemmed terms will be marked as keywords so that they will not be stemmed with stemmers down the chain. Must be placed before any stemming filters. This token filter is implemented using Apache Lucene. */
|
|
1694
|
+
export interface StemmerOverrideTokenFilter extends TokenFilter {
|
|
1695
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1696
|
+
odatatype: "#Microsoft.Azure.Search.StemmerOverrideTokenFilter";
|
|
1697
|
+
/** A list of stemming rules in the following format: "word => stem", for example: "ran => run". */
|
|
1698
|
+
rules: string[];
|
|
1699
|
+
}
|
|
1700
|
+
/** Removes stop words from a token stream. This token filter is implemented using Apache Lucene. */
|
|
1701
|
+
export interface StopwordsTokenFilter extends TokenFilter {
|
|
1702
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1703
|
+
odatatype: "#Microsoft.Azure.Search.StopwordsTokenFilter";
|
|
1704
|
+
/** The list of stopwords. This property and the stopwords list property cannot both be set. */
|
|
1705
|
+
stopwords?: string[];
|
|
1706
|
+
/** A predefined list of stopwords to use. This property and the stopwords property cannot both be set. Default is English. */
|
|
1707
|
+
stopwordsList?: StopwordsList;
|
|
1708
|
+
/** A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. */
|
|
1709
|
+
ignoreCase?: boolean;
|
|
1710
|
+
/** A value indicating whether to ignore the last search term if it's a stop word. Default is true. */
|
|
1711
|
+
removeTrailingStopWords?: boolean;
|
|
1712
|
+
}
|
|
1713
|
+
/** Matches single or multi-word synonyms in a token stream. This token filter is implemented using Apache Lucene. */
|
|
1714
|
+
export interface SynonymTokenFilter extends TokenFilter {
|
|
1715
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1716
|
+
odatatype: "#Microsoft.Azure.Search.SynonymTokenFilter";
|
|
1717
|
+
/** A list of synonyms in following one of two formats: 1. incredible, unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma separated list of equivalent words. Set the expand option to change how this list is interpreted. */
|
|
1718
|
+
synonyms: string[];
|
|
1719
|
+
/** A value indicating whether to case-fold input for matching. Default is false. */
|
|
1720
|
+
ignoreCase?: boolean;
|
|
1721
|
+
/** A value indicating whether all words in the list of synonyms (if => notation is not used) will map to one another. If true, all words in the list of synonyms (if => notation is not used) will map to one another. The following list: incredible, unbelievable, fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable, fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => incredible. Default is true. */
|
|
1722
|
+
expand?: boolean;
|
|
1723
|
+
}
|
|
1724
|
+
/** Truncates the terms to a specific length. This token filter is implemented using Apache Lucene. */
|
|
1725
|
+
export interface TruncateTokenFilter extends TokenFilter {
|
|
1726
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1727
|
+
odatatype: "#Microsoft.Azure.Search.TruncateTokenFilter";
|
|
1728
|
+
/** The length at which terms will be truncated. Default and maximum is 300. */
|
|
1729
|
+
length?: number;
|
|
1730
|
+
}
|
|
1731
|
+
/** Filters out tokens with same text as the previous token. This token filter is implemented using Apache Lucene. */
|
|
1732
|
+
export interface UniqueTokenFilter extends TokenFilter {
|
|
1733
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1734
|
+
odatatype: "#Microsoft.Azure.Search.UniqueTokenFilter";
|
|
1735
|
+
/** A value indicating whether to remove duplicates only at the same position. Default is false. */
|
|
1736
|
+
onlyOnSamePosition?: boolean;
|
|
1737
|
+
}
|
|
1738
|
+
/** Splits words into subwords and performs optional transformations on subword groups. This token filter is implemented using Apache Lucene. */
|
|
1739
|
+
export interface WordDelimiterTokenFilter extends TokenFilter {
|
|
1740
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1741
|
+
odatatype: "#Microsoft.Azure.Search.WordDelimiterTokenFilter";
|
|
1742
|
+
/** A value indicating whether to generate part words. If set, causes parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is true. */
|
|
1743
|
+
generateWordParts?: boolean;
|
|
1744
|
+
/** A value indicating whether to generate number subwords. Default is true. */
|
|
1745
|
+
generateNumberParts?: boolean;
|
|
1746
|
+
/** A value indicating whether maximum runs of word parts will be catenated. For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default is false. */
|
|
1747
|
+
catenateWords?: boolean;
|
|
1748
|
+
/** A value indicating whether maximum runs of number parts will be catenated. For example, if this is set to true, "1-2" becomes "12". Default is false. */
|
|
1749
|
+
catenateNumbers?: boolean;
|
|
1750
|
+
/** A value indicating whether all subword parts will be catenated. For example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false. */
|
|
1751
|
+
catenateAll?: boolean;
|
|
1752
|
+
/** A value indicating whether to split words on caseChange. For example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true. */
|
|
1753
|
+
splitOnCaseChange?: boolean;
|
|
1754
|
+
/** A value indicating whether original words will be preserved and added to the subword list. Default is false. */
|
|
1755
|
+
preserveOriginal?: boolean;
|
|
1756
|
+
/** A value indicating whether to split on numbers. For example, if this is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true. */
|
|
1757
|
+
splitOnNumerics?: boolean;
|
|
1758
|
+
/** A value indicating whether to remove trailing "'s" for each subword. Default is true. */
|
|
1759
|
+
stemEnglishPossessive?: boolean;
|
|
1760
|
+
/** A list of tokens to protect from being delimited. */
|
|
1761
|
+
protectedWords?: string[];
|
|
1762
|
+
}
|
|
1763
|
+
/** A character filter that applies mappings defined with the mappings option. Matching is greedy (longest pattern matching at a given point wins). Replacement is allowed to be the empty string. This character filter is implemented using Apache Lucene. */
|
|
1764
|
+
export interface MappingCharFilter extends CharFilter {
|
|
1765
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1766
|
+
odatatype: "#Microsoft.Azure.Search.MappingCharFilter";
|
|
1767
|
+
/** A list of mappings of the following format: "a=>b" (all occurrences of the character "a" will be replaced with character "b"). */
|
|
1768
|
+
mappings: string[];
|
|
1769
|
+
}
|
|
1770
|
+
/** A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This character filter is implemented using Apache Lucene. */
|
|
1771
|
+
export interface PatternReplaceCharFilter extends CharFilter {
|
|
1772
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1773
|
+
odatatype: "#Microsoft.Azure.Search.PatternReplaceCharFilter";
|
|
1774
|
+
/** A regular expression pattern. */
|
|
1775
|
+
pattern: string;
|
|
1776
|
+
/** The replacement text. */
|
|
1777
|
+
replacement: string;
|
|
1778
|
+
}
|
|
1779
|
+
/** Allows you to configure normalization for filterable, sortable, and facetable fields, which by default operate with strict matching. This is a user-defined configuration consisting of at least one or more filters, which modify the token that is stored. */
|
|
1780
|
+
export interface CustomLexicalNormalizer extends BaseLexicalNormalizer {
|
|
1781
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1782
|
+
odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
|
|
1783
|
+
/** A list of token filters used to filter out or modify the input token. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */
|
|
1784
|
+
tokenFilters?: TokenFilterName[];
|
|
1785
|
+
/** A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */
|
|
1786
|
+
charFilters?: CharFilterName[];
|
|
1787
|
+
}
|
|
1788
|
+
/** Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. This variation of TF-IDF introduces static document length normalization as well as coordinating factors that penalize documents that only partially match the searched queries. */
|
|
1789
|
+
export interface ClassicSimilarity extends Similarity {
|
|
1790
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1791
|
+
odatatype: "#Microsoft.Azure.Search.ClassicSimilarity";
|
|
1792
|
+
}
|
|
1793
|
+
/** Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm that includes length normalization (controlled by the 'b' parameter) as well as term frequency saturation (controlled by the 'k1' parameter). */
|
|
1794
|
+
export interface BM25Similarity extends Similarity {
|
|
1795
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1796
|
+
odatatype: "#Microsoft.Azure.Search.BM25Similarity";
|
|
1797
|
+
/** This property controls the scaling function between the term frequency of each matching terms and the final relevance score of a document-query pair. By default, a value of 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. */
|
|
1798
|
+
k1?: number;
|
|
1799
|
+
/** This property controls how the length of a document affects the relevance score. By default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, while a value of 1.0 means the score is fully normalized by the length of the document. */
|
|
1800
|
+
b?: number;
|
|
1801
|
+
}
|
|
1802
|
+
/** Contains configuration options specific to the HNSW approximate nearest neighbors algorithm used during indexing and querying. The HNSW algorithm offers a tunable trade-off between search speed and accuracy. */
|
|
1803
|
+
export interface HnswAlgorithmConfiguration extends VectorSearchAlgorithmConfiguration {
|
|
1804
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1805
|
+
kind: "hnsw";
|
|
1806
|
+
/** Contains the parameters specific to HNSW algorithm. */
|
|
1807
|
+
parameters?: HnswParameters;
|
|
1808
|
+
}
|
|
1809
|
+
/** Contains configuration options specific to the exhaustive KNN algorithm used during querying, which will perform brute-force search across the entire vector index. */
|
|
1810
|
+
export interface ExhaustiveKnnAlgorithmConfiguration extends VectorSearchAlgorithmConfiguration {
|
|
1811
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1812
|
+
kind: "exhaustiveKnn";
|
|
1813
|
+
/** Contains the parameters specific to exhaustive KNN algorithm. */
|
|
1814
|
+
parameters?: ExhaustiveKnnParameters;
|
|
1815
|
+
}
|
|
1816
|
+
/** Specifies the Azure OpenAI resource used to vectorize a query string. */
|
|
1817
|
+
export interface AzureOpenAIVectorizer extends VectorSearchVectorizer {
|
|
1818
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1819
|
+
kind: "azureOpenAI";
|
|
1820
|
+
/** Contains the parameters specific to Azure OpenAI embedding vectorization. */
|
|
1821
|
+
parameters?: AzureOpenAIParameters;
|
|
1822
|
+
}
|
|
1823
|
+
/** Specifies a user-defined vectorizer for generating the vector embedding of a query string. Integration of an external vectorizer is achieved using the custom Web API interface of a skillset. */
|
|
1824
|
+
export interface WebApiVectorizer extends VectorSearchVectorizer {
|
|
1825
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1826
|
+
kind: "customWebApi";
|
|
1827
|
+
/** Specifies the properties of the user-defined vectorizer. */
|
|
1828
|
+
parameters?: WebApiParameters;
|
|
1829
|
+
}
|
|
1830
|
+
/** Contains configuration options specific to the scalar quantization compression method used during indexing and querying. */
|
|
1831
|
+
export interface ScalarQuantizationCompression extends VectorSearchCompression {
|
|
1832
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1833
|
+
kind: "scalarQuantization";
|
|
1834
|
+
/** Contains the parameters specific to Scalar Quantization. */
|
|
1835
|
+
parameters?: ScalarQuantizationParameters;
|
|
1836
|
+
}
|
|
1837
|
+
/** Contains configuration options specific to the binary quantization compression method used during indexing and querying. */
|
|
1838
|
+
export interface BinaryQuantizationCompression extends VectorSearchCompression {
|
|
1839
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1840
|
+
kind: "binaryQuantization";
|
|
1841
|
+
}
|
|
1842
|
+
/** Clears the identity property of a datasource. */
|
|
1843
|
+
export interface SearchIndexerDataNoneIdentity extends SearchIndexerDataIdentity {
|
|
1844
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1845
|
+
odatatype: "#Microsoft.Azure.Search.DataNoneIdentity";
|
|
1846
|
+
}
|
|
1847
|
+
/** Specifies the identity for a datasource to use. */
|
|
1848
|
+
export interface SearchIndexerDataUserAssignedIdentity extends SearchIndexerDataIdentity {
|
|
1849
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1850
|
+
odatatype: "#Microsoft.Azure.Search.DataUserAssignedIdentity";
|
|
1851
|
+
/** The fully qualified Azure resource Id of a user assigned managed identity typically in the form "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" that should have been assigned to the search service. */
|
|
1852
|
+
resourceId: string;
|
|
1853
|
+
}
|
|
1854
|
+
/** Projection definition for what data to store in Azure Blob. */
|
|
1855
|
+
export interface SearchIndexerKnowledgeStoreObjectProjectionSelector extends SearchIndexerKnowledgeStoreBlobProjectionSelector {
|
|
1856
|
+
}
|
|
1857
|
+
/** Projection definition for what data to store in Azure Files. */
|
|
1858
|
+
export interface SearchIndexerKnowledgeStoreFileProjectionSelector extends SearchIndexerKnowledgeStoreBlobProjectionSelector {
|
|
1859
|
+
}
|
|
1860
|
+
/** Known values of {@link ApiVersion20250901} that the service accepts. */
|
|
1861
|
+
export declare enum KnownApiVersion20250901 {
|
|
1862
|
+
/** Api Version '2025-09-01' */
|
|
1863
|
+
TwoThousandTwentyFive0901 = "2025-09-01"
|
|
1864
|
+
}
|
|
1865
|
+
/**
|
|
1866
|
+
* Defines values for ApiVersion20250901. \
|
|
1867
|
+
* {@link KnownApiVersion20250901} can be used interchangeably with ApiVersion20250901,
|
|
1868
|
+
* this enum contains the known values that the service supports.
|
|
1869
|
+
* ### Known values supported by the service
|
|
1870
|
+
* **2025-09-01**: Api Version '2025-09-01'
|
|
1871
|
+
*/
|
|
1872
|
+
export type ApiVersion20250901 = string;
|
|
1873
|
+
/** Known values of {@link SearchIndexerDataSourceType} that the service accepts. */
|
|
1874
|
+
export declare enum KnownSearchIndexerDataSourceType {
|
|
1875
|
+
/** Definition of an Azure SQL datasource whose credentials can either be a standard SQL connection string or the ResourceId of the SQL resource. The container property refers to the table or view to be indexed. Query parameter is not supported for this datasource. */
|
|
1876
|
+
AzureSql = "azuresql",
|
|
1877
|
+
/** Definition of an CosmosDB datasource whose credentials can either be a formatted connection string containing details for AccountEndpoint, AccountKey, and Database for a key based connection or details for ResourceID and ApiKind for keyless connection. The container property refers to cosmosdb collection to be indexed and the optional query property refers to a SQL query on the collection. */
|
|
1878
|
+
CosmosDb = "cosmosdb",
|
|
1879
|
+
/** Definition of an Azure Blob datasource whose credentials can either be a storage connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property refers to a specific sub-folder in the container. */
|
|
1880
|
+
AzureBlob = "azureblob",
|
|
1881
|
+
/** Definition of an Azure Table datasource whose credentials can either be a table connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property can be used to filter rows. */
|
|
1882
|
+
AzureTable = "azuretable",
|
|
1883
|
+
/** Definition of an Azure SQL datasource whose credentials can either be a standard ADO.NET formatted SQL connection string or the ResourceId of the SQL resource. The container property refers to the table or view to be indexed. Query parameter is not supported for this datasource. */
|
|
1884
|
+
MySql = "mysql",
|
|
1885
|
+
/** Definition of an Azure ADLS Gen 2 datasource whose credentials can either be a storage connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property refers to a specific sub-folder in the container. */
|
|
1886
|
+
AdlsGen2 = "adlsgen2",
|
|
1887
|
+
/** Definition of an Microsoft Fabric Onelake datasource whose credentials can either be the Fabric workspace GUID or a workspace FQDN. The container property refers to the lakehouse GUID and the optional query property refers to folders or shortcuts in the lakehouse. */
|
|
1888
|
+
OneLake = "onelake"
|
|
1889
|
+
}
|
|
1890
|
+
/**
|
|
1891
|
+
* Defines values for SearchIndexerDataSourceType. \
|
|
1892
|
+
* {@link KnownSearchIndexerDataSourceType} can be used interchangeably with SearchIndexerDataSourceType,
|
|
1893
|
+
* this enum contains the known values that the service supports.
|
|
1894
|
+
* ### Known values supported by the service
|
|
1895
|
+
* **azuresql**: Definition of an Azure SQL datasource whose credentials can either be a standard SQL connection string or the ResourceId of the SQL resource. The container property refers to the table or view to be indexed. Query parameter is not supported for this datasource. \
|
|
1896
|
+
* **cosmosdb**: Definition of an CosmosDB datasource whose credentials can either be a formatted connection string containing details for AccountEndpoint, AccountKey, and Database for a key based connection or details for ResourceID and ApiKind for keyless connection. The container property refers to cosmosdb collection to be indexed and the optional query property refers to a SQL query on the collection. \
|
|
1897
|
+
* **azureblob**: Definition of an Azure Blob datasource whose credentials can either be a storage connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property refers to a specific sub-folder in the container. \
|
|
1898
|
+
* **azuretable**: Definition of an Azure Table datasource whose credentials can either be a table connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property can be used to filter rows. \
|
|
1899
|
+
* **mysql**: Definition of an Azure SQL datasource whose credentials can either be a standard ADO.NET formatted SQL connection string or the ResourceId of the SQL resource. The container property refers to the table or view to be indexed. Query parameter is not supported for this datasource. \
|
|
1900
|
+
* **adlsgen2**: Definition of an Azure ADLS Gen 2 datasource whose credentials can either be a storage connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property refers to a specific sub-folder in the container. \
|
|
1901
|
+
* **onelake**: Definition of an Microsoft Fabric Onelake datasource whose credentials can either be the Fabric workspace GUID or a workspace FQDN. The container property refers to the lakehouse GUID and the optional query property refers to folders or shortcuts in the lakehouse.
|
|
1902
|
+
*/
|
|
1903
|
+
export type SearchIndexerDataSourceType = string;
|
|
1904
|
+
/** Known values of {@link BlobIndexerParsingMode} that the service accepts. */
|
|
1905
|
+
export declare enum KnownBlobIndexerParsingMode {
|
|
1906
|
+
/** Set to default for normal file processing. */
|
|
1907
|
+
Default = "default",
|
|
1908
|
+
/** Set to text to improve indexing performance on plain text files in blob storage. */
|
|
1909
|
+
Text = "text",
|
|
1910
|
+
/** Set to delimitedText when blobs are plain CSV files. */
|
|
1911
|
+
DelimitedText = "delimitedText",
|
|
1912
|
+
/** Set to json to extract structured content from JSON files. */
|
|
1913
|
+
Json = "json",
|
|
1914
|
+
/** Set to jsonArray to extract individual elements of a JSON array as separate documents. */
|
|
1915
|
+
JsonArray = "jsonArray",
|
|
1916
|
+
/** Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. */
|
|
1917
|
+
JsonLines = "jsonLines"
|
|
1918
|
+
}
|
|
1919
|
+
/**
|
|
1920
|
+
* Defines values for BlobIndexerParsingMode. \
|
|
1921
|
+
* {@link KnownBlobIndexerParsingMode} can be used interchangeably with BlobIndexerParsingMode,
|
|
1922
|
+
* this enum contains the known values that the service supports.
|
|
1923
|
+
* ### Known values supported by the service
|
|
1924
|
+
* **default**: Set to default for normal file processing. \
|
|
1925
|
+
* **text**: Set to text to improve indexing performance on plain text files in blob storage. \
|
|
1926
|
+
* **delimitedText**: Set to delimitedText when blobs are plain CSV files. \
|
|
1927
|
+
* **json**: Set to json to extract structured content from JSON files. \
|
|
1928
|
+
* **jsonArray**: Set to jsonArray to extract individual elements of a JSON array as separate documents. \
|
|
1929
|
+
* **jsonLines**: Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents.
|
|
1930
|
+
*/
|
|
1931
|
+
export type BlobIndexerParsingMode = string;
|
|
1932
|
+
/** Known values of {@link BlobIndexerDataToExtract} that the service accepts. */
|
|
1933
|
+
export declare enum KnownBlobIndexerDataToExtract {
|
|
1934
|
+
/** Indexes just the standard blob properties and user-specified metadata. */
|
|
1935
|
+
StorageMetadata = "storageMetadata",
|
|
1936
|
+
/** Extracts metadata provided by the Azure blob storage subsystem and the content-type specific metadata (for example, metadata unique to just .png files are indexed). */
|
|
1937
|
+
AllMetadata = "allMetadata",
|
|
1938
|
+
/** Extracts all metadata and textual content from each blob. */
|
|
1939
|
+
ContentAndMetadata = "contentAndMetadata"
|
|
1940
|
+
}
|
|
1941
|
+
/**
|
|
1942
|
+
* Defines values for BlobIndexerDataToExtract. \
|
|
1943
|
+
* {@link KnownBlobIndexerDataToExtract} can be used interchangeably with BlobIndexerDataToExtract,
|
|
1944
|
+
* this enum contains the known values that the service supports.
|
|
1945
|
+
* ### Known values supported by the service
|
|
1946
|
+
* **storageMetadata**: Indexes just the standard blob properties and user-specified metadata. \
|
|
1947
|
+
* **allMetadata**: Extracts metadata provided by the Azure blob storage subsystem and the content-type specific metadata (for example, metadata unique to just .png files are indexed). \
|
|
1948
|
+
* **contentAndMetadata**: Extracts all metadata and textual content from each blob.
|
|
1949
|
+
*/
|
|
1950
|
+
export type BlobIndexerDataToExtract = string;
|
|
1951
|
+
/** Known values of {@link BlobIndexerImageAction} that the service accepts. */
|
|
1952
|
+
export declare enum KnownBlobIndexerImageAction {
|
|
1953
|
+
/** Ignores embedded images or image files in the data set. This is the default. */
|
|
1954
|
+
None = "none",
|
|
1955
|
+
/** Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field. This action requires that "dataToExtract" is set to "contentAndMetadata". A normalized image refers to additional processing resulting in uniform image output, sized and rotated to promote consistent rendering when you include images in visual search results. This information is generated for each image when you use this option. */
|
|
1956
|
+
GenerateNormalizedImages = "generateNormalizedImages",
|
|
1957
|
+
/** Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field, but treats PDF files differently in that each page will be rendered as an image and normalized accordingly, instead of extracting embedded images. Non-PDF file types will be treated the same as if "generateNormalizedImages" was set. */
|
|
1958
|
+
GenerateNormalizedImagePerPage = "generateNormalizedImagePerPage"
|
|
1959
|
+
}
|
|
1960
|
+
/**
|
|
1961
|
+
* Defines values for BlobIndexerImageAction. \
|
|
1962
|
+
* {@link KnownBlobIndexerImageAction} can be used interchangeably with BlobIndexerImageAction,
|
|
1963
|
+
* this enum contains the known values that the service supports.
|
|
1964
|
+
* ### Known values supported by the service
|
|
1965
|
+
* **none**: Ignores embedded images or image files in the data set. This is the default. \
|
|
1966
|
+
* **generateNormalizedImages**: Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field. This action requires that "dataToExtract" is set to "contentAndMetadata". A normalized image refers to additional processing resulting in uniform image output, sized and rotated to promote consistent rendering when you include images in visual search results. This information is generated for each image when you use this option. \
|
|
1967
|
+
* **generateNormalizedImagePerPage**: Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field, but treats PDF files differently in that each page will be rendered as an image and normalized accordingly, instead of extracting embedded images. Non-PDF file types will be treated the same as if "generateNormalizedImages" was set.
|
|
1968
|
+
*/
|
|
1969
|
+
export type BlobIndexerImageAction = string;
|
|
1970
|
+
/** Known values of {@link BlobIndexerPDFTextRotationAlgorithm} that the service accepts. */
|
|
1971
|
+
export declare enum KnownBlobIndexerPDFTextRotationAlgorithm {
|
|
1972
|
+
/** Leverages normal text extraction. This is the default. */
|
|
1973
|
+
None = "none",
|
|
1974
|
+
/** May produce better and more readable text extraction from PDF files that have rotated text within them. Note that there may be a small performance speed impact when this parameter is used. This parameter only applies to PDF files, and only to PDFs with embedded text. If the rotated text appears within an embedded image in the PDF, this parameter does not apply. */
|
|
1975
|
+
DetectAngles = "detectAngles"
|
|
1976
|
+
}
|
|
1977
|
+
/**
|
|
1978
|
+
* Defines values for BlobIndexerPDFTextRotationAlgorithm. \
|
|
1979
|
+
* {@link KnownBlobIndexerPDFTextRotationAlgorithm} can be used interchangeably with BlobIndexerPDFTextRotationAlgorithm,
|
|
1980
|
+
* this enum contains the known values that the service supports.
|
|
1981
|
+
* ### Known values supported by the service
|
|
1982
|
+
* **none**: Leverages normal text extraction. This is the default. \
|
|
1983
|
+
* **detectAngles**: May produce better and more readable text extraction from PDF files that have rotated text within them. Note that there may be a small performance speed impact when this parameter is used. This parameter only applies to PDF files, and only to PDFs with embedded text. If the rotated text appears within an embedded image in the PDF, this parameter does not apply.
|
|
1984
|
+
*/
|
|
1985
|
+
export type BlobIndexerPDFTextRotationAlgorithm = string;
|
|
1986
|
+
/** Known values of {@link IndexerExecutionEnvironment} that the service accepts. */
|
|
1987
|
+
export declare enum KnownIndexerExecutionEnvironment {
|
|
1988
|
+
/** Indicates that the search service can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. */
|
|
1989
|
+
Standard = "standard",
|
|
1990
|
+
/** Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources. */
|
|
1991
|
+
Private = "private"
|
|
1992
|
+
}
|
|
1993
|
+
/**
|
|
1994
|
+
* Defines values for IndexerExecutionEnvironment. \
|
|
1995
|
+
* {@link KnownIndexerExecutionEnvironment} can be used interchangeably with IndexerExecutionEnvironment,
|
|
1996
|
+
* this enum contains the known values that the service supports.
|
|
1997
|
+
* ### Known values supported by the service
|
|
1998
|
+
* **standard**: Indicates that the search service can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. \
|
|
1999
|
+
* **private**: Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources.
|
|
2000
|
+
*/
|
|
2001
|
+
export type IndexerExecutionEnvironment = string;
|
|
2002
|
+
/** Known values of {@link IndexProjectionMode} that the service accepts. */
|
|
2003
|
+
export declare enum KnownIndexProjectionMode {
|
|
2004
|
+
/** The source document will be skipped from writing into the indexer's target index. */
|
|
2005
|
+
SkipIndexingParentDocuments = "skipIndexingParentDocuments",
|
|
2006
|
+
/** The source document will be written into the indexer's target index. This is the default pattern. */
|
|
2007
|
+
IncludeIndexingParentDocuments = "includeIndexingParentDocuments"
|
|
2008
|
+
}
|
|
2009
|
+
/**
|
|
2010
|
+
* Defines values for IndexProjectionMode. \
|
|
2011
|
+
* {@link KnownIndexProjectionMode} can be used interchangeably with IndexProjectionMode,
|
|
2012
|
+
* this enum contains the known values that the service supports.
|
|
2013
|
+
* ### Known values supported by the service
|
|
2014
|
+
* **skipIndexingParentDocuments**: The source document will be skipped from writing into the indexer's target index. \
|
|
2015
|
+
* **includeIndexingParentDocuments**: The source document will be written into the indexer's target index. This is the default pattern.
|
|
2016
|
+
*/
|
|
2017
|
+
export type IndexProjectionMode = string;
|
|
2018
|
+
/** Known values of {@link SearchFieldDataType} that the service accepts. */
|
|
2019
|
+
export declare enum KnownSearchFieldDataType {
|
|
2020
|
+
/** Indicates that a field contains a string. */
|
|
2021
|
+
String = "Edm.String",
|
|
2022
|
+
/** Indicates that a field contains a 32-bit signed integer. */
|
|
2023
|
+
Int32 = "Edm.Int32",
|
|
2024
|
+
/** Indicates that a field contains a 64-bit signed integer. */
|
|
2025
|
+
Int64 = "Edm.Int64",
|
|
2026
|
+
/** Indicates that a field contains an IEEE double-precision floating point number. */
|
|
2027
|
+
Double = "Edm.Double",
|
|
2028
|
+
/** Indicates that a field contains a Boolean value (true or false). */
|
|
2029
|
+
Boolean = "Edm.Boolean",
|
|
2030
|
+
/** Indicates that a field contains a date\/time value, including timezone information. */
|
|
2031
|
+
DateTimeOffset = "Edm.DateTimeOffset",
|
|
2032
|
+
/** Indicates that a field contains a geo-location in terms of longitude and latitude. */
|
|
2033
|
+
GeographyPoint = "Edm.GeographyPoint",
|
|
2034
|
+
/** Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. */
|
|
2035
|
+
Complex = "Edm.ComplexType",
|
|
2036
|
+
/** Indicates that a field contains a single-precision floating point number. This is only valid when used with Collection(Edm.Single). */
|
|
2037
|
+
Single = "Edm.Single",
|
|
2038
|
+
/** Indicates that a field contains a half-precision floating point number. This is only valid when used with Collection(Edm.Half). */
|
|
2039
|
+
Half = "Edm.Half",
|
|
2040
|
+
/** Indicates that a field contains a 16-bit signed integer. This is only valid when used with Collection(Edm.Int16). */
|
|
2041
|
+
Int16 = "Edm.Int16",
|
|
2042
|
+
/** Indicates that a field contains a 8-bit signed integer. This is only valid when used with Collection(Edm.SByte). */
|
|
2043
|
+
SByte = "Edm.SByte",
|
|
2044
|
+
/** Indicates that a field contains a 8-bit unsigned integer. This is only valid when used with Collection(Edm.Byte). */
|
|
2045
|
+
Byte = "Edm.Byte"
|
|
2046
|
+
}
|
|
2047
|
+
/**
|
|
2048
|
+
* Defines values for SearchFieldDataType. \
|
|
2049
|
+
* {@link KnownSearchFieldDataType} can be used interchangeably with SearchFieldDataType,
|
|
2050
|
+
* this enum contains the known values that the service supports.
|
|
2051
|
+
* ### Known values supported by the service
|
|
2052
|
+
* **Edm.String**: Indicates that a field contains a string. \
|
|
2053
|
+
* **Edm.Int32**: Indicates that a field contains a 32-bit signed integer. \
|
|
2054
|
+
* **Edm.Int64**: Indicates that a field contains a 64-bit signed integer. \
|
|
2055
|
+
* **Edm.Double**: Indicates that a field contains an IEEE double-precision floating point number. \
|
|
2056
|
+
* **Edm.Boolean**: Indicates that a field contains a Boolean value (true or false). \
|
|
2057
|
+
* **Edm.DateTimeOffset**: Indicates that a field contains a date\/time value, including timezone information. \
|
|
2058
|
+
* **Edm.GeographyPoint**: Indicates that a field contains a geo-location in terms of longitude and latitude. \
|
|
2059
|
+
* **Edm.ComplexType**: Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. \
|
|
2060
|
+
* **Edm.Single**: Indicates that a field contains a single-precision floating point number. This is only valid when used with Collection(Edm.Single). \
|
|
2061
|
+
* **Edm.Half**: Indicates that a field contains a half-precision floating point number. This is only valid when used with Collection(Edm.Half). \
|
|
2062
|
+
* **Edm.Int16**: Indicates that a field contains a 16-bit signed integer. This is only valid when used with Collection(Edm.Int16). \
|
|
2063
|
+
* **Edm.SByte**: Indicates that a field contains a 8-bit signed integer. This is only valid when used with Collection(Edm.SByte). \
|
|
2064
|
+
* **Edm.Byte**: Indicates that a field contains a 8-bit unsigned integer. This is only valid when used with Collection(Edm.Byte).
|
|
2065
|
+
*/
|
|
2066
|
+
export type SearchFieldDataType = string;
|
|
2067
|
+
/** Known values of {@link LexicalAnalyzerName} that the service accepts. */
|
|
2068
|
+
export declare enum KnownLexicalAnalyzerName {
|
|
2069
|
+
/** Microsoft analyzer for Arabic. */
|
|
2070
|
+
ArMicrosoft = "ar.microsoft",
|
|
2071
|
+
/** Lucene analyzer for Arabic. */
|
|
2072
|
+
ArLucene = "ar.lucene",
|
|
2073
|
+
/** Lucene analyzer for Armenian. */
|
|
2074
|
+
HyLucene = "hy.lucene",
|
|
2075
|
+
/** Microsoft analyzer for Bangla. */
|
|
2076
|
+
BnMicrosoft = "bn.microsoft",
|
|
2077
|
+
/** Lucene analyzer for Basque. */
|
|
2078
|
+
EuLucene = "eu.lucene",
|
|
2079
|
+
/** Microsoft analyzer for Bulgarian. */
|
|
2080
|
+
BgMicrosoft = "bg.microsoft",
|
|
2081
|
+
/** Lucene analyzer for Bulgarian. */
|
|
2082
|
+
BgLucene = "bg.lucene",
|
|
2083
|
+
/** Microsoft analyzer for Catalan. */
|
|
2084
|
+
CaMicrosoft = "ca.microsoft",
|
|
2085
|
+
/** Lucene analyzer for Catalan. */
|
|
2086
|
+
CaLucene = "ca.lucene",
|
|
2087
|
+
/** Microsoft analyzer for Chinese (Simplified). */
|
|
2088
|
+
ZhHansMicrosoft = "zh-Hans.microsoft",
|
|
2089
|
+
/** Lucene analyzer for Chinese (Simplified). */
|
|
2090
|
+
ZhHansLucene = "zh-Hans.lucene",
|
|
2091
|
+
/** Microsoft analyzer for Chinese (Traditional). */
|
|
2092
|
+
ZhHantMicrosoft = "zh-Hant.microsoft",
|
|
2093
|
+
/** Lucene analyzer for Chinese (Traditional). */
|
|
2094
|
+
ZhHantLucene = "zh-Hant.lucene",
|
|
2095
|
+
/** Microsoft analyzer for Croatian. */
|
|
2096
|
+
HrMicrosoft = "hr.microsoft",
|
|
2097
|
+
/** Microsoft analyzer for Czech. */
|
|
2098
|
+
CsMicrosoft = "cs.microsoft",
|
|
2099
|
+
/** Lucene analyzer for Czech. */
|
|
2100
|
+
CsLucene = "cs.lucene",
|
|
2101
|
+
/** Microsoft analyzer for Danish. */
|
|
2102
|
+
DaMicrosoft = "da.microsoft",
|
|
2103
|
+
/** Lucene analyzer for Danish. */
|
|
2104
|
+
DaLucene = "da.lucene",
|
|
2105
|
+
/** Microsoft analyzer for Dutch. */
|
|
2106
|
+
NlMicrosoft = "nl.microsoft",
|
|
2107
|
+
/** Lucene analyzer for Dutch. */
|
|
2108
|
+
NlLucene = "nl.lucene",
|
|
2109
|
+
/** Microsoft analyzer for English. */
|
|
2110
|
+
EnMicrosoft = "en.microsoft",
|
|
2111
|
+
/** Lucene analyzer for English. */
|
|
2112
|
+
EnLucene = "en.lucene",
|
|
2113
|
+
/** Microsoft analyzer for Estonian. */
|
|
2114
|
+
EtMicrosoft = "et.microsoft",
|
|
2115
|
+
/** Microsoft analyzer for Finnish. */
|
|
2116
|
+
FiMicrosoft = "fi.microsoft",
|
|
2117
|
+
/** Lucene analyzer for Finnish. */
|
|
2118
|
+
FiLucene = "fi.lucene",
|
|
2119
|
+
/** Microsoft analyzer for French. */
|
|
2120
|
+
FrMicrosoft = "fr.microsoft",
|
|
2121
|
+
/** Lucene analyzer for French. */
|
|
2122
|
+
FrLucene = "fr.lucene",
|
|
2123
|
+
/** Lucene analyzer for Galician. */
|
|
2124
|
+
GlLucene = "gl.lucene",
|
|
2125
|
+
/** Microsoft analyzer for German. */
|
|
2126
|
+
DeMicrosoft = "de.microsoft",
|
|
2127
|
+
/** Lucene analyzer for German. */
|
|
2128
|
+
DeLucene = "de.lucene",
|
|
2129
|
+
/** Microsoft analyzer for Greek. */
|
|
2130
|
+
ElMicrosoft = "el.microsoft",
|
|
2131
|
+
/** Lucene analyzer for Greek. */
|
|
2132
|
+
ElLucene = "el.lucene",
|
|
2133
|
+
/** Microsoft analyzer for Gujarati. */
|
|
2134
|
+
GuMicrosoft = "gu.microsoft",
|
|
2135
|
+
/** Microsoft analyzer for Hebrew. */
|
|
2136
|
+
HeMicrosoft = "he.microsoft",
|
|
2137
|
+
/** Microsoft analyzer for Hindi. */
|
|
2138
|
+
HiMicrosoft = "hi.microsoft",
|
|
2139
|
+
/** Lucene analyzer for Hindi. */
|
|
2140
|
+
HiLucene = "hi.lucene",
|
|
2141
|
+
/** Microsoft analyzer for Hungarian. */
|
|
2142
|
+
HuMicrosoft = "hu.microsoft",
|
|
2143
|
+
/** Lucene analyzer for Hungarian. */
|
|
2144
|
+
HuLucene = "hu.lucene",
|
|
2145
|
+
/** Microsoft analyzer for Icelandic. */
|
|
2146
|
+
IsMicrosoft = "is.microsoft",
|
|
2147
|
+
/** Microsoft analyzer for Indonesian (Bahasa). */
|
|
2148
|
+
IdMicrosoft = "id.microsoft",
|
|
2149
|
+
/** Lucene analyzer for Indonesian. */
|
|
2150
|
+
IdLucene = "id.lucene",
|
|
2151
|
+
/** Lucene analyzer for Irish. */
|
|
2152
|
+
GaLucene = "ga.lucene",
|
|
2153
|
+
/** Microsoft analyzer for Italian. */
|
|
2154
|
+
ItMicrosoft = "it.microsoft",
|
|
2155
|
+
/** Lucene analyzer for Italian. */
|
|
2156
|
+
ItLucene = "it.lucene",
|
|
2157
|
+
/** Microsoft analyzer for Japanese. */
|
|
2158
|
+
JaMicrosoft = "ja.microsoft",
|
|
2159
|
+
/** Lucene analyzer for Japanese. */
|
|
2160
|
+
JaLucene = "ja.lucene",
|
|
2161
|
+
/** Microsoft analyzer for Kannada. */
|
|
2162
|
+
KnMicrosoft = "kn.microsoft",
|
|
2163
|
+
/** Microsoft analyzer for Korean. */
|
|
2164
|
+
KoMicrosoft = "ko.microsoft",
|
|
2165
|
+
/** Lucene analyzer for Korean. */
|
|
2166
|
+
KoLucene = "ko.lucene",
|
|
2167
|
+
/** Microsoft analyzer for Latvian. */
|
|
2168
|
+
LvMicrosoft = "lv.microsoft",
|
|
2169
|
+
/** Lucene analyzer for Latvian. */
|
|
2170
|
+
LvLucene = "lv.lucene",
|
|
2171
|
+
/** Microsoft analyzer for Lithuanian. */
|
|
2172
|
+
LtMicrosoft = "lt.microsoft",
|
|
2173
|
+
/** Microsoft analyzer for Malayalam. */
|
|
2174
|
+
MlMicrosoft = "ml.microsoft",
|
|
2175
|
+
/** Microsoft analyzer for Malay (Latin). */
|
|
2176
|
+
MsMicrosoft = "ms.microsoft",
|
|
2177
|
+
/** Microsoft analyzer for Marathi. */
|
|
2178
|
+
MrMicrosoft = "mr.microsoft",
|
|
2179
|
+
/** Microsoft analyzer for Norwegian (Bokmål). */
|
|
2180
|
+
NbMicrosoft = "nb.microsoft",
|
|
2181
|
+
/** Lucene analyzer for Norwegian. */
|
|
2182
|
+
NoLucene = "no.lucene",
|
|
2183
|
+
/** Lucene analyzer for Persian. */
|
|
2184
|
+
FaLucene = "fa.lucene",
|
|
2185
|
+
/** Microsoft analyzer for Polish. */
|
|
2186
|
+
PlMicrosoft = "pl.microsoft",
|
|
2187
|
+
/** Lucene analyzer for Polish. */
|
|
2188
|
+
PlLucene = "pl.lucene",
|
|
2189
|
+
/** Microsoft analyzer for Portuguese (Brazil). */
|
|
2190
|
+
PtBrMicrosoft = "pt-BR.microsoft",
|
|
2191
|
+
/** Lucene analyzer for Portuguese (Brazil). */
|
|
2192
|
+
PtBrLucene = "pt-BR.lucene",
|
|
2193
|
+
/** Microsoft analyzer for Portuguese (Portugal). */
|
|
2194
|
+
PtPtMicrosoft = "pt-PT.microsoft",
|
|
2195
|
+
/** Lucene analyzer for Portuguese (Portugal). */
|
|
2196
|
+
PtPtLucene = "pt-PT.lucene",
|
|
2197
|
+
/** Microsoft analyzer for Punjabi. */
|
|
2198
|
+
PaMicrosoft = "pa.microsoft",
|
|
2199
|
+
/** Microsoft analyzer for Romanian. */
|
|
2200
|
+
RoMicrosoft = "ro.microsoft",
|
|
2201
|
+
/** Lucene analyzer for Romanian. */
|
|
2202
|
+
RoLucene = "ro.lucene",
|
|
2203
|
+
/** Microsoft analyzer for Russian. */
|
|
2204
|
+
RuMicrosoft = "ru.microsoft",
|
|
2205
|
+
/** Lucene analyzer for Russian. */
|
|
2206
|
+
RuLucene = "ru.lucene",
|
|
2207
|
+
/** Microsoft analyzer for Serbian (Cyrillic). */
|
|
2208
|
+
SrCyrillicMicrosoft = "sr-cyrillic.microsoft",
|
|
2209
|
+
/** Microsoft analyzer for Serbian (Latin). */
|
|
2210
|
+
SrLatinMicrosoft = "sr-latin.microsoft",
|
|
2211
|
+
/** Microsoft analyzer for Slovak. */
|
|
2212
|
+
SkMicrosoft = "sk.microsoft",
|
|
2213
|
+
/** Microsoft analyzer for Slovenian. */
|
|
2214
|
+
SlMicrosoft = "sl.microsoft",
|
|
2215
|
+
/** Microsoft analyzer for Spanish. */
|
|
2216
|
+
EsMicrosoft = "es.microsoft",
|
|
2217
|
+
/** Lucene analyzer for Spanish. */
|
|
2218
|
+
EsLucene = "es.lucene",
|
|
2219
|
+
/** Microsoft analyzer for Swedish. */
|
|
2220
|
+
SvMicrosoft = "sv.microsoft",
|
|
2221
|
+
/** Lucene analyzer for Swedish. */
|
|
2222
|
+
SvLucene = "sv.lucene",
|
|
2223
|
+
/** Microsoft analyzer for Tamil. */
|
|
2224
|
+
TaMicrosoft = "ta.microsoft",
|
|
2225
|
+
/** Microsoft analyzer for Telugu. */
|
|
2226
|
+
TeMicrosoft = "te.microsoft",
|
|
2227
|
+
/** Microsoft analyzer for Thai. */
|
|
2228
|
+
ThMicrosoft = "th.microsoft",
|
|
2229
|
+
/** Lucene analyzer for Thai. */
|
|
2230
|
+
ThLucene = "th.lucene",
|
|
2231
|
+
/** Microsoft analyzer for Turkish. */
|
|
2232
|
+
TrMicrosoft = "tr.microsoft",
|
|
2233
|
+
/** Lucene analyzer for Turkish. */
|
|
2234
|
+
TrLucene = "tr.lucene",
|
|
2235
|
+
/** Microsoft analyzer for Ukrainian. */
|
|
2236
|
+
UkMicrosoft = "uk.microsoft",
|
|
2237
|
+
/** Microsoft analyzer for Urdu. */
|
|
2238
|
+
UrMicrosoft = "ur.microsoft",
|
|
2239
|
+
/** Microsoft analyzer for Vietnamese. */
|
|
2240
|
+
ViMicrosoft = "vi.microsoft",
|
|
2241
|
+
/** Standard Lucene analyzer. */
|
|
2242
|
+
StandardLucene = "standard.lucene",
|
|
2243
|
+
/** Standard ASCII Folding Lucene analyzer. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#Analyzers */
|
|
2244
|
+
StandardAsciiFoldingLucene = "standardasciifolding.lucene",
|
|
2245
|
+
/** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordAnalyzer.html */
|
|
2246
|
+
Keyword = "keyword",
|
|
2247
|
+
/** Flexibly separates text into terms via a regular expression pattern. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/PatternAnalyzer.html */
|
|
2248
|
+
Pattern = "pattern",
|
|
2249
|
+
/** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/SimpleAnalyzer.html */
|
|
2250
|
+
Simple = "simple",
|
|
2251
|
+
/** Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopAnalyzer.html */
|
|
2252
|
+
Stop = "stop",
|
|
2253
|
+
/** An analyzer that uses the whitespace tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceAnalyzer.html */
|
|
2254
|
+
Whitespace = "whitespace"
|
|
2255
|
+
}
|
|
2256
|
+
/**
|
|
2257
|
+
* Defines values for LexicalAnalyzerName. \
|
|
2258
|
+
* {@link KnownLexicalAnalyzerName} can be used interchangeably with LexicalAnalyzerName,
|
|
2259
|
+
* this enum contains the known values that the service supports.
|
|
2260
|
+
* ### Known values supported by the service
|
|
2261
|
+
* **ar.microsoft**: Microsoft analyzer for Arabic. \
|
|
2262
|
+
* **ar.lucene**: Lucene analyzer for Arabic. \
|
|
2263
|
+
* **hy.lucene**: Lucene analyzer for Armenian. \
|
|
2264
|
+
* **bn.microsoft**: Microsoft analyzer for Bangla. \
|
|
2265
|
+
* **eu.lucene**: Lucene analyzer for Basque. \
|
|
2266
|
+
* **bg.microsoft**: Microsoft analyzer for Bulgarian. \
|
|
2267
|
+
* **bg.lucene**: Lucene analyzer for Bulgarian. \
|
|
2268
|
+
* **ca.microsoft**: Microsoft analyzer for Catalan. \
|
|
2269
|
+
* **ca.lucene**: Lucene analyzer for Catalan. \
|
|
2270
|
+
* **zh-Hans.microsoft**: Microsoft analyzer for Chinese (Simplified). \
|
|
2271
|
+
* **zh-Hans.lucene**: Lucene analyzer for Chinese (Simplified). \
|
|
2272
|
+
* **zh-Hant.microsoft**: Microsoft analyzer for Chinese (Traditional). \
|
|
2273
|
+
* **zh-Hant.lucene**: Lucene analyzer for Chinese (Traditional). \
|
|
2274
|
+
* **hr.microsoft**: Microsoft analyzer for Croatian. \
|
|
2275
|
+
* **cs.microsoft**: Microsoft analyzer for Czech. \
|
|
2276
|
+
* **cs.lucene**: Lucene analyzer for Czech. \
|
|
2277
|
+
* **da.microsoft**: Microsoft analyzer for Danish. \
|
|
2278
|
+
* **da.lucene**: Lucene analyzer for Danish. \
|
|
2279
|
+
* **nl.microsoft**: Microsoft analyzer for Dutch. \
|
|
2280
|
+
* **nl.lucene**: Lucene analyzer for Dutch. \
|
|
2281
|
+
* **en.microsoft**: Microsoft analyzer for English. \
|
|
2282
|
+
* **en.lucene**: Lucene analyzer for English. \
|
|
2283
|
+
* **et.microsoft**: Microsoft analyzer for Estonian. \
|
|
2284
|
+
* **fi.microsoft**: Microsoft analyzer for Finnish. \
|
|
2285
|
+
* **fi.lucene**: Lucene analyzer for Finnish. \
|
|
2286
|
+
* **fr.microsoft**: Microsoft analyzer for French. \
|
|
2287
|
+
* **fr.lucene**: Lucene analyzer for French. \
|
|
2288
|
+
* **gl.lucene**: Lucene analyzer for Galician. \
|
|
2289
|
+
* **de.microsoft**: Microsoft analyzer for German. \
|
|
2290
|
+
* **de.lucene**: Lucene analyzer for German. \
|
|
2291
|
+
* **el.microsoft**: Microsoft analyzer for Greek. \
|
|
2292
|
+
* **el.lucene**: Lucene analyzer for Greek. \
|
|
2293
|
+
* **gu.microsoft**: Microsoft analyzer for Gujarati. \
|
|
2294
|
+
* **he.microsoft**: Microsoft analyzer for Hebrew. \
|
|
2295
|
+
* **hi.microsoft**: Microsoft analyzer for Hindi. \
|
|
2296
|
+
* **hi.lucene**: Lucene analyzer for Hindi. \
|
|
2297
|
+
* **hu.microsoft**: Microsoft analyzer for Hungarian. \
|
|
2298
|
+
* **hu.lucene**: Lucene analyzer for Hungarian. \
|
|
2299
|
+
* **is.microsoft**: Microsoft analyzer for Icelandic. \
|
|
2300
|
+
* **id.microsoft**: Microsoft analyzer for Indonesian (Bahasa). \
|
|
2301
|
+
* **id.lucene**: Lucene analyzer for Indonesian. \
|
|
2302
|
+
* **ga.lucene**: Lucene analyzer for Irish. \
|
|
2303
|
+
* **it.microsoft**: Microsoft analyzer for Italian. \
|
|
2304
|
+
* **it.lucene**: Lucene analyzer for Italian. \
|
|
2305
|
+
* **ja.microsoft**: Microsoft analyzer for Japanese. \
|
|
2306
|
+
* **ja.lucene**: Lucene analyzer for Japanese. \
|
|
2307
|
+
* **kn.microsoft**: Microsoft analyzer for Kannada. \
|
|
2308
|
+
* **ko.microsoft**: Microsoft analyzer for Korean. \
|
|
2309
|
+
* **ko.lucene**: Lucene analyzer for Korean. \
|
|
2310
|
+
* **lv.microsoft**: Microsoft analyzer for Latvian. \
|
|
2311
|
+
* **lv.lucene**: Lucene analyzer for Latvian. \
|
|
2312
|
+
* **lt.microsoft**: Microsoft analyzer for Lithuanian. \
|
|
2313
|
+
* **ml.microsoft**: Microsoft analyzer for Malayalam. \
|
|
2314
|
+
* **ms.microsoft**: Microsoft analyzer for Malay (Latin). \
|
|
2315
|
+
* **mr.microsoft**: Microsoft analyzer for Marathi. \
|
|
2316
|
+
* **nb.microsoft**: Microsoft analyzer for Norwegian (Bokmål). \
|
|
2317
|
+
* **no.lucene**: Lucene analyzer for Norwegian. \
|
|
2318
|
+
* **fa.lucene**: Lucene analyzer for Persian. \
|
|
2319
|
+
* **pl.microsoft**: Microsoft analyzer for Polish. \
|
|
2320
|
+
* **pl.lucene**: Lucene analyzer for Polish. \
|
|
2321
|
+
* **pt-BR.microsoft**: Microsoft analyzer for Portuguese (Brazil). \
|
|
2322
|
+
* **pt-BR.lucene**: Lucene analyzer for Portuguese (Brazil). \
|
|
2323
|
+
* **pt-PT.microsoft**: Microsoft analyzer for Portuguese (Portugal). \
|
|
2324
|
+
* **pt-PT.lucene**: Lucene analyzer for Portuguese (Portugal). \
|
|
2325
|
+
* **pa.microsoft**: Microsoft analyzer for Punjabi. \
|
|
2326
|
+
* **ro.microsoft**: Microsoft analyzer for Romanian. \
|
|
2327
|
+
* **ro.lucene**: Lucene analyzer for Romanian. \
|
|
2328
|
+
* **ru.microsoft**: Microsoft analyzer for Russian. \
|
|
2329
|
+
* **ru.lucene**: Lucene analyzer for Russian. \
|
|
2330
|
+
* **sr-cyrillic.microsoft**: Microsoft analyzer for Serbian (Cyrillic). \
|
|
2331
|
+
* **sr-latin.microsoft**: Microsoft analyzer for Serbian (Latin). \
|
|
2332
|
+
* **sk.microsoft**: Microsoft analyzer for Slovak. \
|
|
2333
|
+
* **sl.microsoft**: Microsoft analyzer for Slovenian. \
|
|
2334
|
+
* **es.microsoft**: Microsoft analyzer for Spanish. \
|
|
2335
|
+
* **es.lucene**: Lucene analyzer for Spanish. \
|
|
2336
|
+
* **sv.microsoft**: Microsoft analyzer for Swedish. \
|
|
2337
|
+
* **sv.lucene**: Lucene analyzer for Swedish. \
|
|
2338
|
+
* **ta.microsoft**: Microsoft analyzer for Tamil. \
|
|
2339
|
+
* **te.microsoft**: Microsoft analyzer for Telugu. \
|
|
2340
|
+
* **th.microsoft**: Microsoft analyzer for Thai. \
|
|
2341
|
+
* **th.lucene**: Lucene analyzer for Thai. \
|
|
2342
|
+
* **tr.microsoft**: Microsoft analyzer for Turkish. \
|
|
2343
|
+
* **tr.lucene**: Lucene analyzer for Turkish. \
|
|
2344
|
+
* **uk.microsoft**: Microsoft analyzer for Ukrainian. \
|
|
2345
|
+
* **ur.microsoft**: Microsoft analyzer for Urdu. \
|
|
2346
|
+
* **vi.microsoft**: Microsoft analyzer for Vietnamese. \
|
|
2347
|
+
* **standard.lucene**: Standard Lucene analyzer. \
|
|
2348
|
+
* **standardasciifolding.lucene**: Standard ASCII Folding Lucene analyzer. See https:\/\/learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#Analyzers \
|
|
2349
|
+
* **keyword**: Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordAnalyzer.html \
|
|
2350
|
+
* **pattern**: Flexibly separates text into terms via a regular expression pattern. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/PatternAnalyzer.html \
|
|
2351
|
+
* **simple**: Divides text at non-letters and converts them to lower case. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/SimpleAnalyzer.html \
|
|
2352
|
+
* **stop**: Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopAnalyzer.html \
|
|
2353
|
+
* **whitespace**: An analyzer that uses the whitespace tokenizer. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceAnalyzer.html
|
|
2354
|
+
*/
|
|
2355
|
+
export type LexicalAnalyzerName = string;
|
|
2356
|
+
/** Known values of {@link LexicalNormalizerName} that the service accepts. */
|
|
2357
|
+
export declare enum KnownLexicalNormalizerName {
|
|
2358
|
+
/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
|
|
2359
|
+
AsciiFolding = "asciifolding",
|
|
2360
|
+
/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
|
|
2361
|
+
Elision = "elision",
|
|
2362
|
+
/** Normalizes token text to lowercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
|
|
2363
|
+
Lowercase = "lowercase",
|
|
2364
|
+
/** Standard normalizer, which consists of lowercase and asciifolding. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
|
|
2365
|
+
Standard = "standard",
|
|
2366
|
+
/** Normalizes token text to uppercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
|
|
2367
|
+
Uppercase = "uppercase"
|
|
2368
|
+
}
|
|
2369
|
+
/**
|
|
2370
|
+
* Defines values for LexicalNormalizerName. \
|
|
2371
|
+
* {@link KnownLexicalNormalizerName} can be used interchangeably with LexicalNormalizerName,
|
|
2372
|
+
* this enum contains the known values that the service supports.
|
|
2373
|
+
* ### Known values supported by the service
|
|
2374
|
+
* **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \
|
|
2375
|
+
* **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \
|
|
2376
|
+
* **lowercase**: Normalizes token text to lowercase. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \
|
|
2377
|
+
* **standard**: Standard normalizer, which consists of lowercase and asciifolding. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \
|
|
2378
|
+
* **uppercase**: Normalizes token text to uppercase. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html
|
|
2379
|
+
*/
|
|
2380
|
+
export type LexicalNormalizerName = string;
|
|
2381
|
+
/** Known values of {@link VectorEncodingFormat} that the service accepts. */
|
|
2382
|
+
export declare enum KnownVectorEncodingFormat {
|
|
2383
|
+
/** Encoding format representing bits packed into a wider data type. */
|
|
2384
|
+
PackedBit = "packedBit"
|
|
2385
|
+
}
|
|
2386
|
+
/**
|
|
2387
|
+
* Defines values for VectorEncodingFormat. \
|
|
2388
|
+
* {@link KnownVectorEncodingFormat} can be used interchangeably with VectorEncodingFormat,
|
|
2389
|
+
* this enum contains the known values that the service supports.
|
|
2390
|
+
* ### Known values supported by the service
|
|
2391
|
+
* **packedBit**: Encoding format representing bits packed into a wider data type.
|
|
2392
|
+
*/
|
|
2393
|
+
export type VectorEncodingFormat = string;
|
|
2394
|
+
/** Known values of {@link RankingOrder} that the service accepts. */
|
|
2395
|
+
export declare enum KnownRankingOrder {
|
|
2396
|
+
/** Sets sort order as BoostedRerankerScore */
|
|
2397
|
+
BoostedRerankerScore = "BoostedRerankerScore",
|
|
2398
|
+
/** Sets sort order as ReRankerScore */
|
|
2399
|
+
RerankerScore = "RerankerScore"
|
|
2400
|
+
}
|
|
2401
|
+
/**
|
|
2402
|
+
* Defines values for RankingOrder. \
|
|
2403
|
+
* {@link KnownRankingOrder} can be used interchangeably with RankingOrder,
|
|
2404
|
+
* this enum contains the known values that the service supports.
|
|
2405
|
+
* ### Known values supported by the service
|
|
2406
|
+
* **BoostedRerankerScore**: Sets sort order as BoostedRerankerScore \
|
|
2407
|
+
* **RerankerScore**: Sets sort order as ReRankerScore
|
|
2408
|
+
*/
|
|
2409
|
+
export type RankingOrder = string;
|
|
2410
|
+
/** Known values of {@link VectorSearchAlgorithmKind} that the service accepts. */
|
|
2411
|
+
export declare enum KnownVectorSearchAlgorithmKind {
|
|
2412
|
+
/** HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm. */
|
|
2413
|
+
Hnsw = "hnsw",
|
|
2414
|
+
/** Exhaustive KNN algorithm which will perform brute-force search. */
|
|
2415
|
+
ExhaustiveKnn = "exhaustiveKnn"
|
|
2416
|
+
}
|
|
2417
|
+
/**
|
|
2418
|
+
* Defines values for VectorSearchAlgorithmKind. \
|
|
2419
|
+
* {@link KnownVectorSearchAlgorithmKind} can be used interchangeably with VectorSearchAlgorithmKind,
|
|
2420
|
+
* this enum contains the known values that the service supports.
|
|
2421
|
+
* ### Known values supported by the service
|
|
2422
|
+
* **hnsw**: HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm. \
|
|
2423
|
+
* **exhaustiveKnn**: Exhaustive KNN algorithm which will perform brute-force search.
|
|
2424
|
+
*/
|
|
2425
|
+
export type VectorSearchAlgorithmKind = string;
|
|
2426
|
+
/** Known values of {@link VectorSearchVectorizerKind} that the service accepts. */
|
|
2427
|
+
export declare enum KnownVectorSearchVectorizerKind {
|
|
2428
|
+
/** Generate embeddings using an Azure OpenAI resource at query time. */
|
|
2429
|
+
AzureOpenAI = "azureOpenAI",
|
|
2430
|
+
/** Generate embeddings using a custom web endpoint at query time. */
|
|
2431
|
+
CustomWebApi = "customWebApi"
|
|
2432
|
+
}
|
|
2433
|
+
/**
|
|
2434
|
+
* Defines values for VectorSearchVectorizerKind. \
|
|
2435
|
+
* {@link KnownVectorSearchVectorizerKind} can be used interchangeably with VectorSearchVectorizerKind,
|
|
2436
|
+
* this enum contains the known values that the service supports.
|
|
2437
|
+
* ### Known values supported by the service
|
|
2438
|
+
* **azureOpenAI**: Generate embeddings using an Azure OpenAI resource at query time. \
|
|
2439
|
+
* **customWebApi**: Generate embeddings using a custom web endpoint at query time.
|
|
2440
|
+
*/
|
|
2441
|
+
export type VectorSearchVectorizerKind = string;
|
|
2442
|
+
/** Known values of {@link VectorSearchCompressionKind} that the service accepts. */
|
|
2443
|
+
export declare enum KnownVectorSearchCompressionKind {
|
|
2444
|
+
/** Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. */
|
|
2445
|
+
ScalarQuantization = "scalarQuantization",
|
|
2446
|
+
/** Binary Quantization, a type of compression method. In binary quantization, the original vectors values are compressed to the narrower binary type by discretizing and representing each component of a vector using binary values, thereby reducing the overall data size. */
|
|
2447
|
+
BinaryQuantization = "binaryQuantization"
|
|
2448
|
+
}
|
|
2449
|
+
/**
|
|
2450
|
+
* Defines values for VectorSearchCompressionKind. \
|
|
2451
|
+
* {@link KnownVectorSearchCompressionKind} can be used interchangeably with VectorSearchCompressionKind,
|
|
2452
|
+
* this enum contains the known values that the service supports.
|
|
2453
|
+
* ### Known values supported by the service
|
|
2454
|
+
* **scalarQuantization**: Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. \
|
|
2455
|
+
* **binaryQuantization**: Binary Quantization, a type of compression method. In binary quantization, the original vectors values are compressed to the narrower binary type by discretizing and representing each component of a vector using binary values, thereby reducing the overall data size.
|
|
2456
|
+
*/
|
|
2457
|
+
export type VectorSearchCompressionKind = string;
|
|
2458
|
+
/** Known values of {@link VectorSearchCompressionRescoreStorageMethod} that the service accepts. */
|
|
2459
|
+
export declare enum KnownVectorSearchCompressionRescoreStorageMethod {
|
|
2460
|
+
/** This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. */
|
|
2461
|
+
PreserveOriginals = "preserveOriginals",
|
|
2462
|
+
/** This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality. */
|
|
2463
|
+
DiscardOriginals = "discardOriginals"
|
|
2464
|
+
}
|
|
2465
|
+
/**
|
|
2466
|
+
* Defines values for VectorSearchCompressionRescoreStorageMethod. \
|
|
2467
|
+
* {@link KnownVectorSearchCompressionRescoreStorageMethod} can be used interchangeably with VectorSearchCompressionRescoreStorageMethod,
|
|
2468
|
+
* this enum contains the known values that the service supports.
|
|
2469
|
+
* ### Known values supported by the service
|
|
2470
|
+
* **preserveOriginals**: This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. \
|
|
2471
|
+
* **discardOriginals**: This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality.
|
|
2472
|
+
*/
|
|
2473
|
+
export type VectorSearchCompressionRescoreStorageMethod = string;
|
|
2474
|
+
/** Known values of {@link TokenFilterName} that the service accepts. */
|
|
2475
|
+
export declare enum KnownTokenFilterName {
|
|
2476
|
+
/** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html */
|
|
2477
|
+
ArabicNormalization = "arabic_normalization",
|
|
2478
|
+
/** Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html */
|
|
2479
|
+
Apostrophe = "apostrophe",
|
|
2480
|
+
/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
|
|
2481
|
+
AsciiFolding = "asciifolding",
|
|
2482
|
+
/** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html */
|
|
2483
|
+
CjkBigram = "cjk_bigram",
|
|
2484
|
+
/** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html */
|
|
2485
|
+
CjkWidth = "cjk_width",
|
|
2486
|
+
/** Removes English possessives, and dots from acronyms. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html */
|
|
2487
|
+
Classic = "classic",
|
|
2488
|
+
/** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html */
|
|
2489
|
+
CommonGram = "common_grams",
|
|
2490
|
+
/** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html */
|
|
2491
|
+
EdgeNGram = "edgeNGram_v2",
|
|
2492
|
+
/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
|
|
2493
|
+
Elision = "elision",
|
|
2494
|
+
/** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html */
|
|
2495
|
+
GermanNormalization = "german_normalization",
|
|
2496
|
+
/** Normalizes text in Hindi to remove some differences in spelling variations. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html */
|
|
2497
|
+
HindiNormalization = "hindi_normalization",
|
|
2498
|
+
/** Normalizes the Unicode representation of text in Indian languages. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html */
|
|
2499
|
+
IndicNormalization = "indic_normalization",
|
|
2500
|
+
/** Emits each incoming token twice, once as keyword and once as non-keyword. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html */
|
|
2501
|
+
KeywordRepeat = "keyword_repeat",
|
|
2502
|
+
/** A high-performance kstem filter for English. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html */
|
|
2503
|
+
KStem = "kstem",
|
|
2504
|
+
/** Removes words that are too long or too short. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html */
|
|
2505
|
+
Length = "length",
|
|
2506
|
+
/** Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html */
|
|
2507
|
+
Limit = "limit",
|
|
2508
|
+
/** Normalizes token text to lower case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
|
|
2509
|
+
Lowercase = "lowercase",
|
|
2510
|
+
/** Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html */
|
|
2511
|
+
NGram = "nGram_v2",
|
|
2512
|
+
/** Applies normalization for Persian. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html */
|
|
2513
|
+
PersianNormalization = "persian_normalization",
|
|
2514
|
+
/** Create tokens for phonetic matches. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html */
|
|
2515
|
+
Phonetic = "phonetic",
|
|
2516
|
+
/** Uses the Porter stemming algorithm to transform the token stream. See http:\//tartarus.org\/~martin\/PorterStemmer */
|
|
2517
|
+
PorterStem = "porter_stem",
|
|
2518
|
+
/** Reverses the token string. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
|
|
2519
|
+
Reverse = "reverse",
|
|
2520
|
+
/** Normalizes use of the interchangeable Scandinavian characters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html */
|
|
2521
|
+
ScandinavianNormalization = "scandinavian_normalization",
|
|
2522
|
+
/** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html */
|
|
2523
|
+
ScandinavianFoldingNormalization = "scandinavian_folding",
|
|
2524
|
+
/** Creates combinations of tokens as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html */
|
|
2525
|
+
Shingle = "shingle",
|
|
2526
|
+
/** A filter that stems words using a Snowball-generated stemmer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html */
|
|
2527
|
+
Snowball = "snowball",
|
|
2528
|
+
/** Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html */
|
|
2529
|
+
SoraniNormalization = "sorani_normalization",
|
|
2530
|
+
/** Language specific stemming filter. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */
|
|
2531
|
+
Stemmer = "stemmer",
|
|
2532
|
+
/** Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html */
|
|
2533
|
+
Stopwords = "stopwords",
|
|
2534
|
+
/** Trims leading and trailing whitespace from tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html */
|
|
2535
|
+
Trim = "trim",
|
|
2536
|
+
/** Truncates the terms to a specific length. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html */
|
|
2537
|
+
Truncate = "truncate",
|
|
2538
|
+
/** Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html */
|
|
2539
|
+
Unique = "unique",
|
|
2540
|
+
/** Normalizes token text to upper case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
|
|
2541
|
+
Uppercase = "uppercase",
|
|
2542
|
+
/** Splits words into subwords and performs optional transformations on subword groups. */
|
|
2543
|
+
WordDelimiter = "word_delimiter"
|
|
2544
|
+
}
|
|
2545
|
+
/**
|
|
2546
|
+
* Defines values for TokenFilterName. \
|
|
2547
|
+
* {@link KnownTokenFilterName} can be used interchangeably with TokenFilterName,
|
|
2548
|
+
* this enum contains the known values that the service supports.
|
|
2549
|
+
* ### Known values supported by the service
|
|
2550
|
+
* **arabic_normalization**: A token filter that applies the Arabic normalizer to normalize the orthography. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html \
|
|
2551
|
+
* **apostrophe**: Strips all characters after an apostrophe (including the apostrophe itself). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html \
|
|
2552
|
+
* **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html \
|
|
2553
|
+
* **cjk_bigram**: Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html \
|
|
2554
|
+
* **cjk_width**: Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html \
|
|
2555
|
+
* **classic**: Removes English possessives, and dots from acronyms. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html \
|
|
2556
|
+
* **common_grams**: Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html \
|
|
2557
|
+
* **edgeNGram_v2**: Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html \
|
|
2558
|
+
* **elision**: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html \
|
|
2559
|
+
* **german_normalization**: Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html \
|
|
2560
|
+
* **hindi_normalization**: Normalizes text in Hindi to remove some differences in spelling variations. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html \
|
|
2561
|
+
* **indic_normalization**: Normalizes the Unicode representation of text in Indian languages. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html \
|
|
2562
|
+
* **keyword_repeat**: Emits each incoming token twice, once as keyword and once as non-keyword. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html \
|
|
2563
|
+
* **kstem**: A high-performance kstem filter for English. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html \
|
|
2564
|
+
* **length**: Removes words that are too long or too short. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html \
|
|
2565
|
+
* **limit**: Limits the number of tokens while indexing. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html \
|
|
2566
|
+
* **lowercase**: Normalizes token text to lower case. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html \
|
|
2567
|
+
* **nGram_v2**: Generates n-grams of the given size(s). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html \
|
|
2568
|
+
* **persian_normalization**: Applies normalization for Persian. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html \
|
|
2569
|
+
* **phonetic**: Create tokens for phonetic matches. See https:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html \
|
|
2570
|
+
* **porter_stem**: Uses the Porter stemming algorithm to transform the token stream. See http:\/\/tartarus.org\/~martin\/PorterStemmer \
|
|
2571
|
+
* **reverse**: Reverses the token string. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html \
|
|
2572
|
+
* **scandinavian_normalization**: Normalizes use of the interchangeable Scandinavian characters. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html \
|
|
2573
|
+
* **scandinavian_folding**: Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html \
|
|
2574
|
+
* **shingle**: Creates combinations of tokens as a single token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html \
|
|
2575
|
+
* **snowball**: A filter that stems words using a Snowball-generated stemmer. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html \
|
|
2576
|
+
* **sorani_normalization**: Normalizes the Unicode representation of Sorani text. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html \
|
|
2577
|
+
* **stemmer**: Language specific stemming filter. See https:\/\/learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters \
|
|
2578
|
+
* **stopwords**: Removes stop words from a token stream. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html \
|
|
2579
|
+
* **trim**: Trims leading and trailing whitespace from tokens. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html \
|
|
2580
|
+
* **truncate**: Truncates the terms to a specific length. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html \
|
|
2581
|
+
* **unique**: Filters out tokens with same text as the previous token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html \
|
|
2582
|
+
* **uppercase**: Normalizes token text to upper case. See https:\/\/lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html \
|
|
2583
|
+
* **word_delimiter**: Splits words into subwords and performs optional transformations on subword groups.
|
|
2584
|
+
*/
|
|
2585
|
+
export type TokenFilterName = string;
|
|
2586
|
+
/** Known values of {@link CharFilterName} that the service accepts. */
|
|
2587
|
+
export declare enum KnownCharFilterName {
|
|
2588
|
+
/** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */
|
|
2589
|
+
HtmlStrip = "html_strip"
|
|
2590
|
+
}
|
|
2591
|
+
/**
|
|
2592
|
+
* Defines values for CharFilterName. \
|
|
2593
|
+
* {@link KnownCharFilterName} can be used interchangeably with CharFilterName,
|
|
2594
|
+
* this enum contains the known values that the service supports.
|
|
2595
|
+
* ### Known values supported by the service
|
|
2596
|
+
* **html_strip**: A character filter that attempts to strip out HTML constructs. See https:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html
|
|
2597
|
+
*/
|
|
2598
|
+
export type CharFilterName = string;
|
|
2599
|
+
/** Known values of {@link VectorSearchAlgorithmMetric} that the service accepts. */
|
|
2600
|
+
export declare enum KnownVectorSearchAlgorithmMetric {
|
|
2601
|
+
/** Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. */
|
|
2602
|
+
Cosine = "cosine",
|
|
2603
|
+
/** Computes the straight-line distance between vectors in a multi-dimensional space. The smaller the distance, the closer the similarity. */
|
|
2604
|
+
Euclidean = "euclidean",
|
|
2605
|
+
/** Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The larger and more positive, the closer the similarity. */
|
|
2606
|
+
DotProduct = "dotProduct",
|
|
2607
|
+
/** Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity. */
|
|
2608
|
+
Hamming = "hamming"
|
|
2609
|
+
}
|
|
2610
|
+
/**
|
|
2611
|
+
* Defines values for VectorSearchAlgorithmMetric. \
|
|
2612
|
+
* {@link KnownVectorSearchAlgorithmMetric} can be used interchangeably with VectorSearchAlgorithmMetric,
|
|
2613
|
+
* this enum contains the known values that the service supports.
|
|
2614
|
+
* ### Known values supported by the service
|
|
2615
|
+
* **cosine**: Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. \
|
|
2616
|
+
* **euclidean**: Computes the straight-line distance between vectors in a multi-dimensional space. The smaller the distance, the closer the similarity. \
|
|
2617
|
+
* **dotProduct**: Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The larger and more positive, the closer the similarity. \
|
|
2618
|
+
* **hamming**: Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity.
|
|
2619
|
+
*/
|
|
2620
|
+
export type VectorSearchAlgorithmMetric = string;
|
|
2621
|
+
/** Known values of {@link VectorSearchCompressionTarget} that the service accepts. */
|
|
2622
|
+
export declare enum KnownVectorSearchCompressionTarget {
|
|
2623
|
+
/** Int8 */
|
|
2624
|
+
Int8 = "int8"
|
|
2625
|
+
}
|
|
2626
|
+
/**
|
|
2627
|
+
* Defines values for VectorSearchCompressionTarget. \
|
|
2628
|
+
* {@link KnownVectorSearchCompressionTarget} can be used interchangeably with VectorSearchCompressionTarget,
|
|
2629
|
+
* this enum contains the known values that the service supports.
|
|
2630
|
+
* ### Known values supported by the service
|
|
2631
|
+
* **int8**
|
|
2632
|
+
*/
|
|
2633
|
+
export type VectorSearchCompressionTarget = string;
|
|
2634
|
+
/** Known values of {@link AzureOpenAIModelName} that the service accepts. */
|
|
2635
|
+
export declare enum KnownAzureOpenAIModelName {
|
|
2636
|
+
/** TextEmbeddingAda002 */
|
|
2637
|
+
TextEmbeddingAda002 = "text-embedding-ada-002",
|
|
2638
|
+
/** TextEmbedding3Large */
|
|
2639
|
+
TextEmbedding3Large = "text-embedding-3-large",
|
|
2640
|
+
/** TextEmbedding3Small */
|
|
2641
|
+
TextEmbedding3Small = "text-embedding-3-small"
|
|
2642
|
+
}
|
|
2643
|
+
/**
|
|
2644
|
+
* Defines values for AzureOpenAIModelName. \
|
|
2645
|
+
* {@link KnownAzureOpenAIModelName} can be used interchangeably with AzureOpenAIModelName,
|
|
2646
|
+
* this enum contains the known values that the service supports.
|
|
2647
|
+
* ### Known values supported by the service
|
|
2648
|
+
* **text-embedding-ada-002** \
|
|
2649
|
+
* **text-embedding-3-large** \
|
|
2650
|
+
* **text-embedding-3-small**
|
|
2651
|
+
*/
|
|
2652
|
+
export type AzureOpenAIModelName = string;
|
|
2653
|
+
/** Known values of {@link KeyPhraseExtractionSkillLanguage} that the service accepts. */
|
|
2654
|
+
export declare enum KnownKeyPhraseExtractionSkillLanguage {
|
|
2655
|
+
/** Danish */
|
|
2656
|
+
Da = "da",
|
|
2657
|
+
/** Dutch */
|
|
2658
|
+
Nl = "nl",
|
|
2659
|
+
/** English */
|
|
2660
|
+
En = "en",
|
|
2661
|
+
/** Finnish */
|
|
2662
|
+
Fi = "fi",
|
|
2663
|
+
/** French */
|
|
2664
|
+
Fr = "fr",
|
|
2665
|
+
/** German */
|
|
2666
|
+
De = "de",
|
|
2667
|
+
/** Italian */
|
|
2668
|
+
It = "it",
|
|
2669
|
+
/** Japanese */
|
|
2670
|
+
Ja = "ja",
|
|
2671
|
+
/** Korean */
|
|
2672
|
+
Ko = "ko",
|
|
2673
|
+
/** Norwegian (Bokmaal) */
|
|
2674
|
+
No = "no",
|
|
2675
|
+
/** Polish */
|
|
2676
|
+
Pl = "pl",
|
|
2677
|
+
/** Portuguese (Portugal) */
|
|
2678
|
+
PtPT = "pt-PT",
|
|
2679
|
+
/** Portuguese (Brazil) */
|
|
2680
|
+
PtBR = "pt-BR",
|
|
2681
|
+
/** Russian */
|
|
2682
|
+
Ru = "ru",
|
|
2683
|
+
/** Spanish */
|
|
2684
|
+
Es = "es",
|
|
2685
|
+
/** Swedish */
|
|
2686
|
+
Sv = "sv"
|
|
2687
|
+
}
|
|
2688
|
+
/**
|
|
2689
|
+
* Defines values for KeyPhraseExtractionSkillLanguage. \
|
|
2690
|
+
* {@link KnownKeyPhraseExtractionSkillLanguage} can be used interchangeably with KeyPhraseExtractionSkillLanguage,
|
|
2691
|
+
* this enum contains the known values that the service supports.
|
|
2692
|
+
* ### Known values supported by the service
|
|
2693
|
+
* **da**: Danish \
|
|
2694
|
+
* **nl**: Dutch \
|
|
2695
|
+
* **en**: English \
|
|
2696
|
+
* **fi**: Finnish \
|
|
2697
|
+
* **fr**: French \
|
|
2698
|
+
* **de**: German \
|
|
2699
|
+
* **it**: Italian \
|
|
2700
|
+
* **ja**: Japanese \
|
|
2701
|
+
* **ko**: Korean \
|
|
2702
|
+
* **no**: Norwegian (Bokmaal) \
|
|
2703
|
+
* **pl**: Polish \
|
|
2704
|
+
* **pt-PT**: Portuguese (Portugal) \
|
|
2705
|
+
* **pt-BR**: Portuguese (Brazil) \
|
|
2706
|
+
* **ru**: Russian \
|
|
2707
|
+
* **es**: Spanish \
|
|
2708
|
+
* **sv**: Swedish
|
|
2709
|
+
*/
|
|
2710
|
+
export type KeyPhraseExtractionSkillLanguage = string;
|
|
2711
|
+
/** Known values of {@link OcrSkillLanguage} that the service accepts. */
|
|
2712
|
+
export declare enum KnownOcrSkillLanguage {
|
|
2713
|
+
/** Afrikaans */
|
|
2714
|
+
Af = "af",
|
|
2715
|
+
/** Albanian */
|
|
2716
|
+
Sq = "sq",
|
|
2717
|
+
/** Angika (Devanagiri) */
|
|
2718
|
+
Anp = "anp",
|
|
2719
|
+
/** Arabic */
|
|
2720
|
+
Ar = "ar",
|
|
2721
|
+
/** Asturian */
|
|
2722
|
+
Ast = "ast",
|
|
2723
|
+
/** Awadhi-Hindi (Devanagiri) */
|
|
2724
|
+
Awa = "awa",
|
|
2725
|
+
/** Azerbaijani (Latin) */
|
|
2726
|
+
Az = "az",
|
|
2727
|
+
/** Bagheli */
|
|
2728
|
+
Bfy = "bfy",
|
|
2729
|
+
/** Basque */
|
|
2730
|
+
Eu = "eu",
|
|
2731
|
+
/** Belarusian (Cyrillic and Latin) */
|
|
2732
|
+
Be = "be",
|
|
2733
|
+
/** Belarusian (Cyrillic) */
|
|
2734
|
+
BeCyrl = "be-cyrl",
|
|
2735
|
+
/** Belarusian (Latin) */
|
|
2736
|
+
BeLatn = "be-latn",
|
|
2737
|
+
/** Bhojpuri-Hindi (Devanagiri) */
|
|
2738
|
+
Bho = "bho",
|
|
2739
|
+
/** Bislama */
|
|
2740
|
+
Bi = "bi",
|
|
2741
|
+
/** Bodo (Devanagiri) */
|
|
2742
|
+
Brx = "brx",
|
|
2743
|
+
/** Bosnian Latin */
|
|
2744
|
+
Bs = "bs",
|
|
2745
|
+
/** Brajbha */
|
|
2746
|
+
Bra = "bra",
|
|
2747
|
+
/** Breton */
|
|
2748
|
+
Br = "br",
|
|
2749
|
+
/** Bulgarian */
|
|
2750
|
+
Bg = "bg",
|
|
2751
|
+
/** Bundeli */
|
|
2752
|
+
Bns = "bns",
|
|
2753
|
+
/** Buryat (Cyrillic) */
|
|
2754
|
+
Bua = "bua",
|
|
2755
|
+
/** Catalan */
|
|
2756
|
+
Ca = "ca",
|
|
2757
|
+
/** Cebuano */
|
|
2758
|
+
Ceb = "ceb",
|
|
2759
|
+
/** Chamling */
|
|
2760
|
+
Rab = "rab",
|
|
2761
|
+
/** Chamorro */
|
|
2762
|
+
Ch = "ch",
|
|
2763
|
+
/** Chhattisgarhi (Devanagiri) */
|
|
2764
|
+
Hne = "hne",
|
|
2765
|
+
/** Chinese Simplified */
|
|
2766
|
+
ZhHans = "zh-Hans",
|
|
2767
|
+
/** Chinese Traditional */
|
|
2768
|
+
ZhHant = "zh-Hant",
|
|
2769
|
+
/** Cornish */
|
|
2770
|
+
Kw = "kw",
|
|
2771
|
+
/** Corsican */
|
|
2772
|
+
Co = "co",
|
|
2773
|
+
/** Crimean Tatar (Latin) */
|
|
2774
|
+
Crh = "crh",
|
|
2775
|
+
/** Croatian */
|
|
2776
|
+
Hr = "hr",
|
|
2777
|
+
/** Czech */
|
|
2778
|
+
Cs = "cs",
|
|
2779
|
+
/** Danish */
|
|
2780
|
+
Da = "da",
|
|
2781
|
+
/** Dari */
|
|
2782
|
+
Prs = "prs",
|
|
2783
|
+
/** Dhimal (Devanagiri) */
|
|
2784
|
+
Dhi = "dhi",
|
|
2785
|
+
/** Dogri (Devanagiri) */
|
|
2786
|
+
Doi = "doi",
|
|
2787
|
+
/** Dutch */
|
|
2788
|
+
Nl = "nl",
|
|
2789
|
+
/** English */
|
|
2790
|
+
En = "en",
|
|
2791
|
+
/** Erzya (Cyrillic) */
|
|
2792
|
+
Myv = "myv",
|
|
2793
|
+
/** Estonian */
|
|
2794
|
+
Et = "et",
|
|
2795
|
+
/** Faroese */
|
|
2796
|
+
Fo = "fo",
|
|
2797
|
+
/** Fijian */
|
|
2798
|
+
Fj = "fj",
|
|
2799
|
+
/** Filipino */
|
|
2800
|
+
Fil = "fil",
|
|
2801
|
+
/** Finnish */
|
|
2802
|
+
Fi = "fi",
|
|
2803
|
+
/** French */
|
|
2804
|
+
Fr = "fr",
|
|
2805
|
+
/** Frulian */
|
|
2806
|
+
Fur = "fur",
|
|
2807
|
+
/** Gagauz (Latin) */
|
|
2808
|
+
Gag = "gag",
|
|
2809
|
+
/** Galician */
|
|
2810
|
+
Gl = "gl",
|
|
2811
|
+
/** German */
|
|
2812
|
+
De = "de",
|
|
2813
|
+
/** Gilbertese */
|
|
2814
|
+
Gil = "gil",
|
|
2815
|
+
/** Gondi (Devanagiri) */
|
|
2816
|
+
Gon = "gon",
|
|
2817
|
+
/** Greek */
|
|
2818
|
+
El = "el",
|
|
2819
|
+
/** Greenlandic */
|
|
2820
|
+
Kl = "kl",
|
|
2821
|
+
/** Gurung (Devanagiri) */
|
|
2822
|
+
Gvr = "gvr",
|
|
2823
|
+
/** Haitian Creole */
|
|
2824
|
+
Ht = "ht",
|
|
2825
|
+
/** Halbi (Devanagiri) */
|
|
2826
|
+
Hlb = "hlb",
|
|
2827
|
+
/** Hani */
|
|
2828
|
+
Hni = "hni",
|
|
2829
|
+
/** Haryanvi */
|
|
2830
|
+
Bgc = "bgc",
|
|
2831
|
+
/** Hawaiian */
|
|
2832
|
+
Haw = "haw",
|
|
2833
|
+
/** Hindi */
|
|
2834
|
+
Hi = "hi",
|
|
2835
|
+
/** Hmong Daw (Latin) */
|
|
2836
|
+
Mww = "mww",
|
|
2837
|
+
/** Ho (Devanagiri) */
|
|
2838
|
+
Hoc = "hoc",
|
|
2839
|
+
/** Hungarian */
|
|
2840
|
+
Hu = "hu",
|
|
2841
|
+
/** Icelandic */
|
|
2842
|
+
Is = "is",
|
|
2843
|
+
/** Inari Sami */
|
|
2844
|
+
Smn = "smn",
|
|
2845
|
+
/** Indonesian */
|
|
2846
|
+
Id = "id",
|
|
2847
|
+
/** Interlingua */
|
|
2848
|
+
Ia = "ia",
|
|
2849
|
+
/** Inuktitut (Latin) */
|
|
2850
|
+
Iu = "iu",
|
|
2851
|
+
/** Irish */
|
|
2852
|
+
Ga = "ga",
|
|
2853
|
+
/** Italian */
|
|
2854
|
+
It = "it",
|
|
2855
|
+
/** Japanese */
|
|
2856
|
+
Ja = "ja",
|
|
2857
|
+
/** Jaunsari (Devanagiri) */
|
|
2858
|
+
Jns = "Jns",
|
|
2859
|
+
/** Javanese */
|
|
2860
|
+
Jv = "jv",
|
|
2861
|
+
/** Kabuverdianu */
|
|
2862
|
+
Kea = "kea",
|
|
2863
|
+
/** Kachin (Latin) */
|
|
2864
|
+
Kac = "kac",
|
|
2865
|
+
/** Kangri (Devanagiri) */
|
|
2866
|
+
Xnr = "xnr",
|
|
2867
|
+
/** Karachay-Balkar */
|
|
2868
|
+
Krc = "krc",
|
|
2869
|
+
/** Kara-Kalpak (Cyrillic) */
|
|
2870
|
+
KaaCyrl = "kaa-cyrl",
|
|
2871
|
+
/** Kara-Kalpak (Latin) */
|
|
2872
|
+
Kaa = "kaa",
|
|
2873
|
+
/** Kashubian */
|
|
2874
|
+
Csb = "csb",
|
|
2875
|
+
/** Kazakh (Cyrillic) */
|
|
2876
|
+
KkCyrl = "kk-cyrl",
|
|
2877
|
+
/** Kazakh (Latin) */
|
|
2878
|
+
KkLatn = "kk-latn",
|
|
2879
|
+
/** Khaling */
|
|
2880
|
+
Klr = "klr",
|
|
2881
|
+
/** Khasi */
|
|
2882
|
+
Kha = "kha",
|
|
2883
|
+
/** K'iche' */
|
|
2884
|
+
Quc = "quc",
|
|
2885
|
+
/** Korean */
|
|
2886
|
+
Ko = "ko",
|
|
2887
|
+
/** Korku */
|
|
2888
|
+
Kfq = "kfq",
|
|
2889
|
+
/** Koryak */
|
|
2890
|
+
Kpy = "kpy",
|
|
2891
|
+
/** Kosraean */
|
|
2892
|
+
Kos = "kos",
|
|
2893
|
+
/** Kumyk (Cyrillic) */
|
|
2894
|
+
Kum = "kum",
|
|
2895
|
+
/** Kurdish (Arabic) */
|
|
2896
|
+
KuArab = "ku-arab",
|
|
2897
|
+
/** Kurdish (Latin) */
|
|
2898
|
+
KuLatn = "ku-latn",
|
|
2899
|
+
/** Kurukh (Devanagiri) */
|
|
2900
|
+
Kru = "kru",
|
|
2901
|
+
/** Kyrgyz (Cyrillic) */
|
|
2902
|
+
Ky = "ky",
|
|
2903
|
+
/** Lakota */
|
|
2904
|
+
Lkt = "lkt",
|
|
2905
|
+
/** Latin */
|
|
2906
|
+
La = "la",
|
|
2907
|
+
/** Lithuanian */
|
|
2908
|
+
Lt = "lt",
|
|
2909
|
+
/** Lower Sorbian */
|
|
2910
|
+
Dsb = "dsb",
|
|
2911
|
+
/** Lule Sami */
|
|
2912
|
+
Smj = "smj",
|
|
2913
|
+
/** Luxembourgish */
|
|
2914
|
+
Lb = "lb",
|
|
2915
|
+
/** Mahasu Pahari (Devanagiri) */
|
|
2916
|
+
Bfz = "bfz",
|
|
2917
|
+
/** Malay (Latin) */
|
|
2918
|
+
Ms = "ms",
|
|
2919
|
+
/** Maltese */
|
|
2920
|
+
Mt = "mt",
|
|
2921
|
+
/** Malto (Devanagiri) */
|
|
2922
|
+
Kmj = "kmj",
|
|
2923
|
+
/** Manx */
|
|
2924
|
+
Gv = "gv",
|
|
2925
|
+
/** Maori */
|
|
2926
|
+
Mi = "mi",
|
|
2927
|
+
/** Marathi */
|
|
2928
|
+
Mr = "mr",
|
|
2929
|
+
/** Mongolian (Cyrillic) */
|
|
2930
|
+
Mn = "mn",
|
|
2931
|
+
/** Montenegrin (Cyrillic) */
|
|
2932
|
+
CnrCyrl = "cnr-cyrl",
|
|
2933
|
+
/** Montenegrin (Latin) */
|
|
2934
|
+
CnrLatn = "cnr-latn",
|
|
2935
|
+
/** Neapolitan */
|
|
2936
|
+
Nap = "nap",
|
|
2937
|
+
/** Nepali */
|
|
2938
|
+
Ne = "ne",
|
|
2939
|
+
/** Niuean */
|
|
2940
|
+
Niu = "niu",
|
|
2941
|
+
/** Nogay */
|
|
2942
|
+
Nog = "nog",
|
|
2943
|
+
/** Northern Sami (Latin) */
|
|
2944
|
+
Sme = "sme",
|
|
2945
|
+
/** Norwegian */
|
|
2946
|
+
Nb = "nb",
|
|
2947
|
+
/** Norwegian */
|
|
2948
|
+
No = "no",
|
|
2949
|
+
/** Occitan */
|
|
2950
|
+
Oc = "oc",
|
|
2951
|
+
/** Ossetic */
|
|
2952
|
+
Os = "os",
|
|
2953
|
+
/** Pashto */
|
|
2954
|
+
Ps = "ps",
|
|
2955
|
+
/** Persian */
|
|
2956
|
+
Fa = "fa",
|
|
2957
|
+
/** Polish */
|
|
2958
|
+
Pl = "pl",
|
|
2959
|
+
/** Portuguese */
|
|
2960
|
+
Pt = "pt",
|
|
2961
|
+
/** Punjabi (Arabic) */
|
|
2962
|
+
Pa = "pa",
|
|
2963
|
+
/** Ripuarian */
|
|
2964
|
+
Ksh = "ksh",
|
|
2965
|
+
/** Romanian */
|
|
2966
|
+
Ro = "ro",
|
|
2967
|
+
/** Romansh */
|
|
2968
|
+
Rm = "rm",
|
|
2969
|
+
/** Russian */
|
|
2970
|
+
Ru = "ru",
|
|
2971
|
+
/** Sadri (Devanagiri) */
|
|
2972
|
+
Sck = "sck",
|
|
2973
|
+
/** Samoan (Latin) */
|
|
2974
|
+
Sm = "sm",
|
|
2975
|
+
/** Sanskrit (Devanagiri) */
|
|
2976
|
+
Sa = "sa",
|
|
2977
|
+
/** Santali (Devanagiri) */
|
|
2978
|
+
Sat = "sat",
|
|
2979
|
+
/** Scots */
|
|
2980
|
+
Sco = "sco",
|
|
2981
|
+
/** Scottish Gaelic */
|
|
2982
|
+
Gd = "gd",
|
|
2983
|
+
/** Serbian (Latin) */
|
|
2984
|
+
Sr = "sr",
|
|
2985
|
+
/** Serbian (Cyrillic) */
|
|
2986
|
+
SrCyrl = "sr-Cyrl",
|
|
2987
|
+
/** Serbian (Latin) */
|
|
2988
|
+
SrLatn = "sr-Latn",
|
|
2989
|
+
/** Sherpa (Devanagiri) */
|
|
2990
|
+
Xsr = "xsr",
|
|
2991
|
+
/** Sirmauri (Devanagiri) */
|
|
2992
|
+
Srx = "srx",
|
|
2993
|
+
/** Skolt Sami */
|
|
2994
|
+
Sms = "sms",
|
|
2995
|
+
/** Slovak */
|
|
2996
|
+
Sk = "sk",
|
|
2997
|
+
/** Slovenian */
|
|
2998
|
+
Sl = "sl",
|
|
2999
|
+
/** Somali (Arabic) */
|
|
3000
|
+
So = "so",
|
|
3001
|
+
/** Southern Sami */
|
|
3002
|
+
Sma = "sma",
|
|
3003
|
+
/** Spanish */
|
|
3004
|
+
Es = "es",
|
|
3005
|
+
/** Swahili (Latin) */
|
|
3006
|
+
Sw = "sw",
|
|
3007
|
+
/** Swedish */
|
|
3008
|
+
Sv = "sv",
|
|
3009
|
+
/** Tajik (Cyrillic) */
|
|
3010
|
+
Tg = "tg",
|
|
3011
|
+
/** Tatar (Latin) */
|
|
3012
|
+
Tt = "tt",
|
|
3013
|
+
/** Tetum */
|
|
3014
|
+
Tet = "tet",
|
|
3015
|
+
/** Thangmi */
|
|
3016
|
+
Thf = "thf",
|
|
3017
|
+
/** Tongan */
|
|
3018
|
+
To = "to",
|
|
3019
|
+
/** Turkish */
|
|
3020
|
+
Tr = "tr",
|
|
3021
|
+
/** Turkmen (Latin) */
|
|
3022
|
+
Tk = "tk",
|
|
3023
|
+
/** Tuvan */
|
|
3024
|
+
Tyv = "tyv",
|
|
3025
|
+
/** Upper Sorbian */
|
|
3026
|
+
Hsb = "hsb",
|
|
3027
|
+
/** Urdu */
|
|
3028
|
+
Ur = "ur",
|
|
3029
|
+
/** Uyghur (Arabic) */
|
|
3030
|
+
Ug = "ug",
|
|
3031
|
+
/** Uzbek (Arabic) */
|
|
3032
|
+
UzArab = "uz-arab",
|
|
3033
|
+
/** Uzbek (Cyrillic) */
|
|
3034
|
+
UzCyrl = "uz-cyrl",
|
|
3035
|
+
/** Uzbek (Latin) */
|
|
3036
|
+
Uz = "uz",
|
|
3037
|
+
/** Volapük */
|
|
3038
|
+
Vo = "vo",
|
|
3039
|
+
/** Walser */
|
|
3040
|
+
Wae = "wae",
|
|
3041
|
+
/** Welsh */
|
|
3042
|
+
Cy = "cy",
|
|
3043
|
+
/** Western Frisian */
|
|
3044
|
+
Fy = "fy",
|
|
3045
|
+
/** Yucatec Maya */
|
|
3046
|
+
Yua = "yua",
|
|
3047
|
+
/** Zhuang */
|
|
3048
|
+
Za = "za",
|
|
3049
|
+
/** Zulu */
|
|
3050
|
+
Zu = "zu",
|
|
3051
|
+
/** Unknown (All) */
|
|
3052
|
+
Unk = "unk"
|
|
3053
|
+
}
|
|
3054
|
+
/**
|
|
3055
|
+
* Defines values for OcrSkillLanguage. \
|
|
3056
|
+
* {@link KnownOcrSkillLanguage} can be used interchangeably with OcrSkillLanguage,
|
|
3057
|
+
* this enum contains the known values that the service supports.
|
|
3058
|
+
* ### Known values supported by the service
|
|
3059
|
+
* **af**: Afrikaans \
|
|
3060
|
+
* **sq**: Albanian \
|
|
3061
|
+
* **anp**: Angika (Devanagiri) \
|
|
3062
|
+
* **ar**: Arabic \
|
|
3063
|
+
* **ast**: Asturian \
|
|
3064
|
+
* **awa**: Awadhi-Hindi (Devanagiri) \
|
|
3065
|
+
* **az**: Azerbaijani (Latin) \
|
|
3066
|
+
* **bfy**: Bagheli \
|
|
3067
|
+
* **eu**: Basque \
|
|
3068
|
+
* **be**: Belarusian (Cyrillic and Latin) \
|
|
3069
|
+
* **be-cyrl**: Belarusian (Cyrillic) \
|
|
3070
|
+
* **be-latn**: Belarusian (Latin) \
|
|
3071
|
+
* **bho**: Bhojpuri-Hindi (Devanagiri) \
|
|
3072
|
+
* **bi**: Bislama \
|
|
3073
|
+
* **brx**: Bodo (Devanagiri) \
|
|
3074
|
+
* **bs**: Bosnian Latin \
|
|
3075
|
+
* **bra**: Brajbha \
|
|
3076
|
+
* **br**: Breton \
|
|
3077
|
+
* **bg**: Bulgarian \
|
|
3078
|
+
* **bns**: Bundeli \
|
|
3079
|
+
* **bua**: Buryat (Cyrillic) \
|
|
3080
|
+
* **ca**: Catalan \
|
|
3081
|
+
* **ceb**: Cebuano \
|
|
3082
|
+
* **rab**: Chamling \
|
|
3083
|
+
* **ch**: Chamorro \
|
|
3084
|
+
* **hne**: Chhattisgarhi (Devanagiri) \
|
|
3085
|
+
* **zh-Hans**: Chinese Simplified \
|
|
3086
|
+
* **zh-Hant**: Chinese Traditional \
|
|
3087
|
+
* **kw**: Cornish \
|
|
3088
|
+
* **co**: Corsican \
|
|
3089
|
+
* **crh**: Crimean Tatar (Latin) \
|
|
3090
|
+
* **hr**: Croatian \
|
|
3091
|
+
* **cs**: Czech \
|
|
3092
|
+
* **da**: Danish \
|
|
3093
|
+
* **prs**: Dari \
|
|
3094
|
+
* **dhi**: Dhimal (Devanagiri) \
|
|
3095
|
+
* **doi**: Dogri (Devanagiri) \
|
|
3096
|
+
* **nl**: Dutch \
|
|
3097
|
+
* **en**: English \
|
|
3098
|
+
* **myv**: Erzya (Cyrillic) \
|
|
3099
|
+
* **et**: Estonian \
|
|
3100
|
+
* **fo**: Faroese \
|
|
3101
|
+
* **fj**: Fijian \
|
|
3102
|
+
* **fil**: Filipino \
|
|
3103
|
+
* **fi**: Finnish \
|
|
3104
|
+
* **fr**: French \
|
|
3105
|
+
* **fur**: Frulian \
|
|
3106
|
+
* **gag**: Gagauz (Latin) \
|
|
3107
|
+
* **gl**: Galician \
|
|
3108
|
+
* **de**: German \
|
|
3109
|
+
* **gil**: Gilbertese \
|
|
3110
|
+
* **gon**: Gondi (Devanagiri) \
|
|
3111
|
+
* **el**: Greek \
|
|
3112
|
+
* **kl**: Greenlandic \
|
|
3113
|
+
* **gvr**: Gurung (Devanagiri) \
|
|
3114
|
+
* **ht**: Haitian Creole \
|
|
3115
|
+
* **hlb**: Halbi (Devanagiri) \
|
|
3116
|
+
* **hni**: Hani \
|
|
3117
|
+
* **bgc**: Haryanvi \
|
|
3118
|
+
* **haw**: Hawaiian \
|
|
3119
|
+
* **hi**: Hindi \
|
|
3120
|
+
* **mww**: Hmong Daw (Latin) \
|
|
3121
|
+
* **hoc**: Ho (Devanagiri) \
|
|
3122
|
+
* **hu**: Hungarian \
|
|
3123
|
+
* **is**: Icelandic \
|
|
3124
|
+
* **smn**: Inari Sami \
|
|
3125
|
+
* **id**: Indonesian \
|
|
3126
|
+
* **ia**: Interlingua \
|
|
3127
|
+
* **iu**: Inuktitut (Latin) \
|
|
3128
|
+
* **ga**: Irish \
|
|
3129
|
+
* **it**: Italian \
|
|
3130
|
+
* **ja**: Japanese \
|
|
3131
|
+
* **Jns**: Jaunsari (Devanagiri) \
|
|
3132
|
+
* **jv**: Javanese \
|
|
3133
|
+
* **kea**: Kabuverdianu \
|
|
3134
|
+
* **kac**: Kachin (Latin) \
|
|
3135
|
+
* **xnr**: Kangri (Devanagiri) \
|
|
3136
|
+
* **krc**: Karachay-Balkar \
|
|
3137
|
+
* **kaa-cyrl**: Kara-Kalpak (Cyrillic) \
|
|
3138
|
+
* **kaa**: Kara-Kalpak (Latin) \
|
|
3139
|
+
* **csb**: Kashubian \
|
|
3140
|
+
* **kk-cyrl**: Kazakh (Cyrillic) \
|
|
3141
|
+
* **kk-latn**: Kazakh (Latin) \
|
|
3142
|
+
* **klr**: Khaling \
|
|
3143
|
+
* **kha**: Khasi \
|
|
3144
|
+
* **quc**: K'iche' \
|
|
3145
|
+
* **ko**: Korean \
|
|
3146
|
+
* **kfq**: Korku \
|
|
3147
|
+
* **kpy**: Koryak \
|
|
3148
|
+
* **kos**: Kosraean \
|
|
3149
|
+
* **kum**: Kumyk (Cyrillic) \
|
|
3150
|
+
* **ku-arab**: Kurdish (Arabic) \
|
|
3151
|
+
* **ku-latn**: Kurdish (Latin) \
|
|
3152
|
+
* **kru**: Kurukh (Devanagiri) \
|
|
3153
|
+
* **ky**: Kyrgyz (Cyrillic) \
|
|
3154
|
+
* **lkt**: Lakota \
|
|
3155
|
+
* **la**: Latin \
|
|
3156
|
+
* **lt**: Lithuanian \
|
|
3157
|
+
* **dsb**: Lower Sorbian \
|
|
3158
|
+
* **smj**: Lule Sami \
|
|
3159
|
+
* **lb**: Luxembourgish \
|
|
3160
|
+
* **bfz**: Mahasu Pahari (Devanagiri) \
|
|
3161
|
+
* **ms**: Malay (Latin) \
|
|
3162
|
+
* **mt**: Maltese \
|
|
3163
|
+
* **kmj**: Malto (Devanagiri) \
|
|
3164
|
+
* **gv**: Manx \
|
|
3165
|
+
* **mi**: Maori \
|
|
3166
|
+
* **mr**: Marathi \
|
|
3167
|
+
* **mn**: Mongolian (Cyrillic) \
|
|
3168
|
+
* **cnr-cyrl**: Montenegrin (Cyrillic) \
|
|
3169
|
+
* **cnr-latn**: Montenegrin (Latin) \
|
|
3170
|
+
* **nap**: Neapolitan \
|
|
3171
|
+
* **ne**: Nepali \
|
|
3172
|
+
* **niu**: Niuean \
|
|
3173
|
+
* **nog**: Nogay \
|
|
3174
|
+
* **sme**: Northern Sami (Latin) \
|
|
3175
|
+
* **nb**: Norwegian \
|
|
3176
|
+
* **no**: Norwegian \
|
|
3177
|
+
* **oc**: Occitan \
|
|
3178
|
+
* **os**: Ossetic \
|
|
3179
|
+
* **ps**: Pashto \
|
|
3180
|
+
* **fa**: Persian \
|
|
3181
|
+
* **pl**: Polish \
|
|
3182
|
+
* **pt**: Portuguese \
|
|
3183
|
+
* **pa**: Punjabi (Arabic) \
|
|
3184
|
+
* **ksh**: Ripuarian \
|
|
3185
|
+
* **ro**: Romanian \
|
|
3186
|
+
* **rm**: Romansh \
|
|
3187
|
+
* **ru**: Russian \
|
|
3188
|
+
* **sck**: Sadri (Devanagiri) \
|
|
3189
|
+
* **sm**: Samoan (Latin) \
|
|
3190
|
+
* **sa**: Sanskrit (Devanagiri) \
|
|
3191
|
+
* **sat**: Santali (Devanagiri) \
|
|
3192
|
+
* **sco**: Scots \
|
|
3193
|
+
* **gd**: Scottish Gaelic \
|
|
3194
|
+
* **sr**: Serbian (Latin) \
|
|
3195
|
+
* **sr-Cyrl**: Serbian (Cyrillic) \
|
|
3196
|
+
* **sr-Latn**: Serbian (Latin) \
|
|
3197
|
+
* **xsr**: Sherpa (Devanagiri) \
|
|
3198
|
+
* **srx**: Sirmauri (Devanagiri) \
|
|
3199
|
+
* **sms**: Skolt Sami \
|
|
3200
|
+
* **sk**: Slovak \
|
|
3201
|
+
* **sl**: Slovenian \
|
|
3202
|
+
* **so**: Somali (Arabic) \
|
|
3203
|
+
* **sma**: Southern Sami \
|
|
3204
|
+
* **es**: Spanish \
|
|
3205
|
+
* **sw**: Swahili (Latin) \
|
|
3206
|
+
* **sv**: Swedish \
|
|
3207
|
+
* **tg**: Tajik (Cyrillic) \
|
|
3208
|
+
* **tt**: Tatar (Latin) \
|
|
3209
|
+
* **tet**: Tetum \
|
|
3210
|
+
* **thf**: Thangmi \
|
|
3211
|
+
* **to**: Tongan \
|
|
3212
|
+
* **tr**: Turkish \
|
|
3213
|
+
* **tk**: Turkmen (Latin) \
|
|
3214
|
+
* **tyv**: Tuvan \
|
|
3215
|
+
* **hsb**: Upper Sorbian \
|
|
3216
|
+
* **ur**: Urdu \
|
|
3217
|
+
* **ug**: Uyghur (Arabic) \
|
|
3218
|
+
* **uz-arab**: Uzbek (Arabic) \
|
|
3219
|
+
* **uz-cyrl**: Uzbek (Cyrillic) \
|
|
3220
|
+
* **uz**: Uzbek (Latin) \
|
|
3221
|
+
* **vo**: Volapük \
|
|
3222
|
+
* **wae**: Walser \
|
|
3223
|
+
* **cy**: Welsh \
|
|
3224
|
+
* **fy**: Western Frisian \
|
|
3225
|
+
* **yua**: Yucatec Maya \
|
|
3226
|
+
* **za**: Zhuang \
|
|
3227
|
+
* **zu**: Zulu \
|
|
3228
|
+
* **unk**: Unknown (All)
|
|
3229
|
+
*/
|
|
3230
|
+
export type OcrSkillLanguage = string;
|
|
3231
|
+
/** Known values of {@link OcrLineEnding} that the service accepts. */
|
|
3232
|
+
export declare enum KnownOcrLineEnding {
|
|
3233
|
+
/** Lines are separated by a single space character. */
|
|
3234
|
+
Space = "space",
|
|
3235
|
+
/** Lines are separated by a carriage return ('\r') character. */
|
|
3236
|
+
CarriageReturn = "carriageReturn",
|
|
3237
|
+
/** Lines are separated by a single line feed ('\n') character. */
|
|
3238
|
+
LineFeed = "lineFeed",
|
|
3239
|
+
/** Lines are separated by a carriage return and a line feed ('\r\n') character. */
|
|
3240
|
+
CarriageReturnLineFeed = "carriageReturnLineFeed"
|
|
3241
|
+
}
|
|
3242
|
+
/**
|
|
3243
|
+
* Defines values for OcrLineEnding. \
|
|
3244
|
+
* {@link KnownOcrLineEnding} can be used interchangeably with OcrLineEnding,
|
|
3245
|
+
* this enum contains the known values that the service supports.
|
|
3246
|
+
* ### Known values supported by the service
|
|
3247
|
+
* **space**: Lines are separated by a single space character. \
|
|
3248
|
+
* **carriageReturn**: Lines are separated by a carriage return ('\r') character. \
|
|
3249
|
+
* **lineFeed**: Lines are separated by a single line feed ('\n') character. \
|
|
3250
|
+
* **carriageReturnLineFeed**: Lines are separated by a carriage return and a line feed ('\r\n') character.
|
|
3251
|
+
*/
|
|
3252
|
+
export type OcrLineEnding = string;
|
|
3253
|
+
/** Known values of {@link ImageAnalysisSkillLanguage} that the service accepts. */
|
|
3254
|
+
export declare enum KnownImageAnalysisSkillLanguage {
|
|
3255
|
+
/** Arabic */
|
|
3256
|
+
Ar = "ar",
|
|
3257
|
+
/** Azerbaijani */
|
|
3258
|
+
Az = "az",
|
|
3259
|
+
/** Bulgarian */
|
|
3260
|
+
Bg = "bg",
|
|
3261
|
+
/** Bosnian Latin */
|
|
3262
|
+
Bs = "bs",
|
|
3263
|
+
/** Catalan */
|
|
3264
|
+
Ca = "ca",
|
|
3265
|
+
/** Czech */
|
|
3266
|
+
Cs = "cs",
|
|
3267
|
+
/** Welsh */
|
|
3268
|
+
Cy = "cy",
|
|
3269
|
+
/** Danish */
|
|
3270
|
+
Da = "da",
|
|
3271
|
+
/** German */
|
|
3272
|
+
De = "de",
|
|
3273
|
+
/** Greek */
|
|
3274
|
+
El = "el",
|
|
3275
|
+
/** English */
|
|
3276
|
+
En = "en",
|
|
3277
|
+
/** Spanish */
|
|
3278
|
+
Es = "es",
|
|
3279
|
+
/** Estonian */
|
|
3280
|
+
Et = "et",
|
|
3281
|
+
/** Basque */
|
|
3282
|
+
Eu = "eu",
|
|
3283
|
+
/** Finnish */
|
|
3284
|
+
Fi = "fi",
|
|
3285
|
+
/** French */
|
|
3286
|
+
Fr = "fr",
|
|
3287
|
+
/** Irish */
|
|
3288
|
+
Ga = "ga",
|
|
3289
|
+
/** Galician */
|
|
3290
|
+
Gl = "gl",
|
|
3291
|
+
/** Hebrew */
|
|
3292
|
+
He = "he",
|
|
3293
|
+
/** Hindi */
|
|
3294
|
+
Hi = "hi",
|
|
3295
|
+
/** Croatian */
|
|
3296
|
+
Hr = "hr",
|
|
3297
|
+
/** Hungarian */
|
|
3298
|
+
Hu = "hu",
|
|
3299
|
+
/** Indonesian */
|
|
3300
|
+
Id = "id",
|
|
3301
|
+
/** Italian */
|
|
3302
|
+
It = "it",
|
|
3303
|
+
/** Japanese */
|
|
3304
|
+
Ja = "ja",
|
|
3305
|
+
/** Kazakh */
|
|
3306
|
+
Kk = "kk",
|
|
3307
|
+
/** Korean */
|
|
3308
|
+
Ko = "ko",
|
|
3309
|
+
/** Lithuanian */
|
|
3310
|
+
Lt = "lt",
|
|
3311
|
+
/** Latvian */
|
|
3312
|
+
Lv = "lv",
|
|
3313
|
+
/** Macedonian */
|
|
3314
|
+
Mk = "mk",
|
|
3315
|
+
/** Malay Malaysia */
|
|
3316
|
+
Ms = "ms",
|
|
3317
|
+
/** Norwegian (Bokmal) */
|
|
3318
|
+
Nb = "nb",
|
|
3319
|
+
/** Dutch */
|
|
3320
|
+
Nl = "nl",
|
|
3321
|
+
/** Polish */
|
|
3322
|
+
Pl = "pl",
|
|
3323
|
+
/** Dari */
|
|
3324
|
+
Prs = "prs",
|
|
3325
|
+
/** Portuguese-Brazil */
|
|
3326
|
+
PtBR = "pt-BR",
|
|
3327
|
+
/** Portuguese-Portugal */
|
|
3328
|
+
Pt = "pt",
|
|
3329
|
+
/** Portuguese-Portugal */
|
|
3330
|
+
PtPT = "pt-PT",
|
|
3331
|
+
/** Romanian */
|
|
3332
|
+
Ro = "ro",
|
|
3333
|
+
/** Russian */
|
|
3334
|
+
Ru = "ru",
|
|
3335
|
+
/** Slovak */
|
|
3336
|
+
Sk = "sk",
|
|
3337
|
+
/** Slovenian */
|
|
3338
|
+
Sl = "sl",
|
|
3339
|
+
/** Serbian - Cyrillic RS */
|
|
3340
|
+
SrCyrl = "sr-Cyrl",
|
|
3341
|
+
/** Serbian - Latin RS */
|
|
3342
|
+
SrLatn = "sr-Latn",
|
|
3343
|
+
/** Swedish */
|
|
3344
|
+
Sv = "sv",
|
|
3345
|
+
/** Thai */
|
|
3346
|
+
Th = "th",
|
|
3347
|
+
/** Turkish */
|
|
3348
|
+
Tr = "tr",
|
|
3349
|
+
/** Ukrainian */
|
|
3350
|
+
Uk = "uk",
|
|
3351
|
+
/** Vietnamese */
|
|
3352
|
+
Vi = "vi",
|
|
3353
|
+
/** Chinese Simplified */
|
|
3354
|
+
Zh = "zh",
|
|
3355
|
+
/** Chinese Simplified */
|
|
3356
|
+
ZhHans = "zh-Hans",
|
|
3357
|
+
/** Chinese Traditional */
|
|
3358
|
+
ZhHant = "zh-Hant"
|
|
3359
|
+
}
|
|
3360
|
+
/**
|
|
3361
|
+
* Defines values for ImageAnalysisSkillLanguage. \
|
|
3362
|
+
* {@link KnownImageAnalysisSkillLanguage} can be used interchangeably with ImageAnalysisSkillLanguage,
|
|
3363
|
+
* this enum contains the known values that the service supports.
|
|
3364
|
+
* ### Known values supported by the service
|
|
3365
|
+
* **ar**: Arabic \
|
|
3366
|
+
* **az**: Azerbaijani \
|
|
3367
|
+
* **bg**: Bulgarian \
|
|
3368
|
+
* **bs**: Bosnian Latin \
|
|
3369
|
+
* **ca**: Catalan \
|
|
3370
|
+
* **cs**: Czech \
|
|
3371
|
+
* **cy**: Welsh \
|
|
3372
|
+
* **da**: Danish \
|
|
3373
|
+
* **de**: German \
|
|
3374
|
+
* **el**: Greek \
|
|
3375
|
+
* **en**: English \
|
|
3376
|
+
* **es**: Spanish \
|
|
3377
|
+
* **et**: Estonian \
|
|
3378
|
+
* **eu**: Basque \
|
|
3379
|
+
* **fi**: Finnish \
|
|
3380
|
+
* **fr**: French \
|
|
3381
|
+
* **ga**: Irish \
|
|
3382
|
+
* **gl**: Galician \
|
|
3383
|
+
* **he**: Hebrew \
|
|
3384
|
+
* **hi**: Hindi \
|
|
3385
|
+
* **hr**: Croatian \
|
|
3386
|
+
* **hu**: Hungarian \
|
|
3387
|
+
* **id**: Indonesian \
|
|
3388
|
+
* **it**: Italian \
|
|
3389
|
+
* **ja**: Japanese \
|
|
3390
|
+
* **kk**: Kazakh \
|
|
3391
|
+
* **ko**: Korean \
|
|
3392
|
+
* **lt**: Lithuanian \
|
|
3393
|
+
* **lv**: Latvian \
|
|
3394
|
+
* **mk**: Macedonian \
|
|
3395
|
+
* **ms**: Malay Malaysia \
|
|
3396
|
+
* **nb**: Norwegian (Bokmal) \
|
|
3397
|
+
* **nl**: Dutch \
|
|
3398
|
+
* **pl**: Polish \
|
|
3399
|
+
* **prs**: Dari \
|
|
3400
|
+
* **pt-BR**: Portuguese-Brazil \
|
|
3401
|
+
* **pt**: Portuguese-Portugal \
|
|
3402
|
+
* **pt-PT**: Portuguese-Portugal \
|
|
3403
|
+
* **ro**: Romanian \
|
|
3404
|
+
* **ru**: Russian \
|
|
3405
|
+
* **sk**: Slovak \
|
|
3406
|
+
* **sl**: Slovenian \
|
|
3407
|
+
* **sr-Cyrl**: Serbian - Cyrillic RS \
|
|
3408
|
+
* **sr-Latn**: Serbian - Latin RS \
|
|
3409
|
+
* **sv**: Swedish \
|
|
3410
|
+
* **th**: Thai \
|
|
3411
|
+
* **tr**: Turkish \
|
|
3412
|
+
* **uk**: Ukrainian \
|
|
3413
|
+
* **vi**: Vietnamese \
|
|
3414
|
+
* **zh**: Chinese Simplified \
|
|
3415
|
+
* **zh-Hans**: Chinese Simplified \
|
|
3416
|
+
* **zh-Hant**: Chinese Traditional
|
|
3417
|
+
*/
|
|
3418
|
+
export type ImageAnalysisSkillLanguage = string;
|
|
3419
|
+
/** Known values of {@link VisualFeature} that the service accepts. */
|
|
3420
|
+
export declare enum KnownVisualFeature {
|
|
3421
|
+
/** Visual features recognized as adult persons. */
|
|
3422
|
+
Adult = "adult",
|
|
3423
|
+
/** Visual features recognized as commercial brands. */
|
|
3424
|
+
Brands = "brands",
|
|
3425
|
+
/** Categories. */
|
|
3426
|
+
Categories = "categories",
|
|
3427
|
+
/** Description. */
|
|
3428
|
+
Description = "description",
|
|
3429
|
+
/** Visual features recognized as people faces. */
|
|
3430
|
+
Faces = "faces",
|
|
3431
|
+
/** Visual features recognized as objects. */
|
|
3432
|
+
Objects = "objects",
|
|
3433
|
+
/** Tags. */
|
|
3434
|
+
Tags = "tags"
|
|
3435
|
+
}
|
|
3436
|
+
/**
|
|
3437
|
+
* Defines values for VisualFeature. \
|
|
3438
|
+
* {@link KnownVisualFeature} can be used interchangeably with VisualFeature,
|
|
3439
|
+
* this enum contains the known values that the service supports.
|
|
3440
|
+
* ### Known values supported by the service
|
|
3441
|
+
* **adult**: Visual features recognized as adult persons. \
|
|
3442
|
+
* **brands**: Visual features recognized as commercial brands. \
|
|
3443
|
+
* **categories**: Categories. \
|
|
3444
|
+
* **description**: Description. \
|
|
3445
|
+
* **faces**: Visual features recognized as people faces. \
|
|
3446
|
+
* **objects**: Visual features recognized as objects. \
|
|
3447
|
+
* **tags**: Tags.
|
|
3448
|
+
*/
|
|
3449
|
+
export type VisualFeature = string;
|
|
3450
|
+
/** Known values of {@link ImageDetail} that the service accepts. */
|
|
3451
|
+
export declare enum KnownImageDetail {
|
|
3452
|
+
/** Details recognized as celebrities. */
|
|
3453
|
+
Celebrities = "celebrities",
|
|
3454
|
+
/** Details recognized as landmarks. */
|
|
3455
|
+
Landmarks = "landmarks"
|
|
3456
|
+
}
|
|
3457
|
+
/**
|
|
3458
|
+
* Defines values for ImageDetail. \
|
|
3459
|
+
* {@link KnownImageDetail} can be used interchangeably with ImageDetail,
|
|
3460
|
+
* this enum contains the known values that the service supports.
|
|
3461
|
+
* ### Known values supported by the service
|
|
3462
|
+
* **celebrities**: Details recognized as celebrities. \
|
|
3463
|
+
* **landmarks**: Details recognized as landmarks.
|
|
3464
|
+
*/
|
|
3465
|
+
export type ImageDetail = string;
|
|
3466
|
+
/** Known values of {@link EntityCategory} that the service accepts. */
|
|
3467
|
+
export declare enum KnownEntityCategory {
|
|
3468
|
+
/** Entities describing a physical location. */
|
|
3469
|
+
Location = "location",
|
|
3470
|
+
/** Entities describing an organization. */
|
|
3471
|
+
Organization = "organization",
|
|
3472
|
+
/** Entities describing a person. */
|
|
3473
|
+
Person = "person",
|
|
3474
|
+
/** Entities describing a quantity. */
|
|
3475
|
+
Quantity = "quantity",
|
|
3476
|
+
/** Entities describing a date and time. */
|
|
3477
|
+
Datetime = "datetime",
|
|
3478
|
+
/** Entities describing a URL. */
|
|
3479
|
+
Url = "url",
|
|
3480
|
+
/** Entities describing an email address. */
|
|
3481
|
+
Email = "email"
|
|
3482
|
+
}
|
|
3483
|
+
/**
|
|
3484
|
+
* Defines values for EntityCategory. \
|
|
3485
|
+
* {@link KnownEntityCategory} can be used interchangeably with EntityCategory,
|
|
3486
|
+
* this enum contains the known values that the service supports.
|
|
3487
|
+
* ### Known values supported by the service
|
|
3488
|
+
* **location**: Entities describing a physical location. \
|
|
3489
|
+
* **organization**: Entities describing an organization. \
|
|
3490
|
+
* **person**: Entities describing a person. \
|
|
3491
|
+
* **quantity**: Entities describing a quantity. \
|
|
3492
|
+
* **datetime**: Entities describing a date and time. \
|
|
3493
|
+
* **url**: Entities describing a URL. \
|
|
3494
|
+
* **email**: Entities describing an email address.
|
|
3495
|
+
*/
|
|
3496
|
+
export type EntityCategory = string;
|
|
3497
|
+
/** Known values of {@link EntityRecognitionSkillLanguage} that the service accepts. */
|
|
3498
|
+
export declare enum KnownEntityRecognitionSkillLanguage {
|
|
3499
|
+
/** Arabic */
|
|
3500
|
+
Ar = "ar",
|
|
3501
|
+
/** Czech */
|
|
3502
|
+
Cs = "cs",
|
|
3503
|
+
/** Chinese-Simplified */
|
|
3504
|
+
ZhHans = "zh-Hans",
|
|
3505
|
+
/** Chinese-Traditional */
|
|
3506
|
+
ZhHant = "zh-Hant",
|
|
3507
|
+
/** Danish */
|
|
3508
|
+
Da = "da",
|
|
3509
|
+
/** Dutch */
|
|
3510
|
+
Nl = "nl",
|
|
3511
|
+
/** English */
|
|
3512
|
+
En = "en",
|
|
3513
|
+
/** Finnish */
|
|
3514
|
+
Fi = "fi",
|
|
3515
|
+
/** French */
|
|
3516
|
+
Fr = "fr",
|
|
3517
|
+
/** German */
|
|
3518
|
+
De = "de",
|
|
3519
|
+
/** Greek */
|
|
3520
|
+
El = "el",
|
|
3521
|
+
/** Hungarian */
|
|
3522
|
+
Hu = "hu",
|
|
3523
|
+
/** Italian */
|
|
3524
|
+
It = "it",
|
|
3525
|
+
/** Japanese */
|
|
3526
|
+
Ja = "ja",
|
|
3527
|
+
/** Korean */
|
|
3528
|
+
Ko = "ko",
|
|
3529
|
+
/** Norwegian (Bokmaal) */
|
|
3530
|
+
No = "no",
|
|
3531
|
+
/** Polish */
|
|
3532
|
+
Pl = "pl",
|
|
3533
|
+
/** Portuguese (Portugal) */
|
|
3534
|
+
PtPT = "pt-PT",
|
|
3535
|
+
/** Portuguese (Brazil) */
|
|
3536
|
+
PtBR = "pt-BR",
|
|
3537
|
+
/** Russian */
|
|
3538
|
+
Ru = "ru",
|
|
3539
|
+
/** Spanish */
|
|
3540
|
+
Es = "es",
|
|
3541
|
+
/** Swedish */
|
|
3542
|
+
Sv = "sv",
|
|
3543
|
+
/** Turkish */
|
|
3544
|
+
Tr = "tr"
|
|
3545
|
+
}
|
|
3546
|
+
/**
|
|
3547
|
+
* Defines values for EntityRecognitionSkillLanguage. \
|
|
3548
|
+
* {@link KnownEntityRecognitionSkillLanguage} can be used interchangeably with EntityRecognitionSkillLanguage,
|
|
3549
|
+
* this enum contains the known values that the service supports.
|
|
3550
|
+
* ### Known values supported by the service
|
|
3551
|
+
* **ar**: Arabic \
|
|
3552
|
+
* **cs**: Czech \
|
|
3553
|
+
* **zh-Hans**: Chinese-Simplified \
|
|
3554
|
+
* **zh-Hant**: Chinese-Traditional \
|
|
3555
|
+
* **da**: Danish \
|
|
3556
|
+
* **nl**: Dutch \
|
|
3557
|
+
* **en**: English \
|
|
3558
|
+
* **fi**: Finnish \
|
|
3559
|
+
* **fr**: French \
|
|
3560
|
+
* **de**: German \
|
|
3561
|
+
* **el**: Greek \
|
|
3562
|
+
* **hu**: Hungarian \
|
|
3563
|
+
* **it**: Italian \
|
|
3564
|
+
* **ja**: Japanese \
|
|
3565
|
+
* **ko**: Korean \
|
|
3566
|
+
* **no**: Norwegian (Bokmaal) \
|
|
3567
|
+
* **pl**: Polish \
|
|
3568
|
+
* **pt-PT**: Portuguese (Portugal) \
|
|
3569
|
+
* **pt-BR**: Portuguese (Brazil) \
|
|
3570
|
+
* **ru**: Russian \
|
|
3571
|
+
* **es**: Spanish \
|
|
3572
|
+
* **sv**: Swedish \
|
|
3573
|
+
* **tr**: Turkish
|
|
3574
|
+
*/
|
|
3575
|
+
export type EntityRecognitionSkillLanguage = string;
|
|
3576
|
+
/** Known values of {@link SentimentSkillLanguage} that the service accepts. */
|
|
3577
|
+
export declare enum KnownSentimentSkillLanguage {
|
|
3578
|
+
/** Danish */
|
|
3579
|
+
Da = "da",
|
|
3580
|
+
/** Dutch */
|
|
3581
|
+
Nl = "nl",
|
|
3582
|
+
/** English */
|
|
3583
|
+
En = "en",
|
|
3584
|
+
/** Finnish */
|
|
3585
|
+
Fi = "fi",
|
|
3586
|
+
/** French */
|
|
3587
|
+
Fr = "fr",
|
|
3588
|
+
/** German */
|
|
3589
|
+
De = "de",
|
|
3590
|
+
/** Greek */
|
|
3591
|
+
El = "el",
|
|
3592
|
+
/** Italian */
|
|
3593
|
+
It = "it",
|
|
3594
|
+
/** Norwegian (Bokmaal) */
|
|
3595
|
+
No = "no",
|
|
3596
|
+
/** Polish */
|
|
3597
|
+
Pl = "pl",
|
|
3598
|
+
/** Portuguese (Portugal) */
|
|
3599
|
+
PtPT = "pt-PT",
|
|
3600
|
+
/** Russian */
|
|
3601
|
+
Ru = "ru",
|
|
3602
|
+
/** Spanish */
|
|
3603
|
+
Es = "es",
|
|
3604
|
+
/** Swedish */
|
|
3605
|
+
Sv = "sv",
|
|
3606
|
+
/** Turkish */
|
|
3607
|
+
Tr = "tr"
|
|
3608
|
+
}
|
|
3609
|
+
/**
|
|
3610
|
+
* Defines values for SentimentSkillLanguage. \
|
|
3611
|
+
* {@link KnownSentimentSkillLanguage} can be used interchangeably with SentimentSkillLanguage,
|
|
3612
|
+
* this enum contains the known values that the service supports.
|
|
3613
|
+
* ### Known values supported by the service
|
|
3614
|
+
* **da**: Danish \
|
|
3615
|
+
* **nl**: Dutch \
|
|
3616
|
+
* **en**: English \
|
|
3617
|
+
* **fi**: Finnish \
|
|
3618
|
+
* **fr**: French \
|
|
3619
|
+
* **de**: German \
|
|
3620
|
+
* **el**: Greek \
|
|
3621
|
+
* **it**: Italian \
|
|
3622
|
+
* **no**: Norwegian (Bokmaal) \
|
|
3623
|
+
* **pl**: Polish \
|
|
3624
|
+
* **pt-PT**: Portuguese (Portugal) \
|
|
3625
|
+
* **ru**: Russian \
|
|
3626
|
+
* **es**: Spanish \
|
|
3627
|
+
* **sv**: Swedish \
|
|
3628
|
+
* **tr**: Turkish
|
|
3629
|
+
*/
|
|
3630
|
+
export type SentimentSkillLanguage = string;
|
|
3631
|
+
/** Known values of {@link PIIDetectionSkillMaskingMode} that the service accepts. */
|
|
3632
|
+
export declare enum KnownPIIDetectionSkillMaskingMode {
|
|
3633
|
+
/** No masking occurs and the maskedText output will not be returned. */
|
|
3634
|
+
None = "none",
|
|
3635
|
+
/** Replaces the detected entities with the character given in the maskingCharacter parameter. The character will be repeated to the length of the detected entity so that the offsets will correctly correspond to both the input text as well as the output maskedText. */
|
|
3636
|
+
Replace = "replace"
|
|
3637
|
+
}
|
|
3638
|
+
/**
|
|
3639
|
+
* Defines values for PIIDetectionSkillMaskingMode. \
|
|
3640
|
+
* {@link KnownPIIDetectionSkillMaskingMode} can be used interchangeably with PIIDetectionSkillMaskingMode,
|
|
3641
|
+
* this enum contains the known values that the service supports.
|
|
3642
|
+
* ### Known values supported by the service
|
|
3643
|
+
* **none**: No masking occurs and the maskedText output will not be returned. \
|
|
3644
|
+
* **replace**: Replaces the detected entities with the character given in the maskingCharacter parameter. The character will be repeated to the length of the detected entity so that the offsets will correctly correspond to both the input text as well as the output maskedText.
|
|
3645
|
+
*/
|
|
3646
|
+
export type PIIDetectionSkillMaskingMode = string;
|
|
3647
|
+
/** Known values of {@link SplitSkillLanguage} that the service accepts. */
|
|
3648
|
+
export declare enum KnownSplitSkillLanguage {
|
|
3649
|
+
/** Amharic */
|
|
3650
|
+
Am = "am",
|
|
3651
|
+
/** Bosnian */
|
|
3652
|
+
Bs = "bs",
|
|
3653
|
+
/** Czech */
|
|
3654
|
+
Cs = "cs",
|
|
3655
|
+
/** Danish */
|
|
3656
|
+
Da = "da",
|
|
3657
|
+
/** German */
|
|
3658
|
+
De = "de",
|
|
3659
|
+
/** English */
|
|
3660
|
+
En = "en",
|
|
3661
|
+
/** Spanish */
|
|
3662
|
+
Es = "es",
|
|
3663
|
+
/** Estonian */
|
|
3664
|
+
Et = "et",
|
|
3665
|
+
/** Finnish */
|
|
3666
|
+
Fi = "fi",
|
|
3667
|
+
/** French */
|
|
3668
|
+
Fr = "fr",
|
|
3669
|
+
/** Hebrew */
|
|
3670
|
+
He = "he",
|
|
3671
|
+
/** Hindi */
|
|
3672
|
+
Hi = "hi",
|
|
3673
|
+
/** Croatian */
|
|
3674
|
+
Hr = "hr",
|
|
3675
|
+
/** Hungarian */
|
|
3676
|
+
Hu = "hu",
|
|
3677
|
+
/** Indonesian */
|
|
3678
|
+
Id = "id",
|
|
3679
|
+
/** Icelandic */
|
|
3680
|
+
Is = "is",
|
|
3681
|
+
/** Italian */
|
|
3682
|
+
It = "it",
|
|
3683
|
+
/** Japanese */
|
|
3684
|
+
Ja = "ja",
|
|
3685
|
+
/** Korean */
|
|
3686
|
+
Ko = "ko",
|
|
3687
|
+
/** Latvian */
|
|
3688
|
+
Lv = "lv",
|
|
3689
|
+
/** Norwegian */
|
|
3690
|
+
Nb = "nb",
|
|
3691
|
+
/** Dutch */
|
|
3692
|
+
Nl = "nl",
|
|
3693
|
+
/** Polish */
|
|
3694
|
+
Pl = "pl",
|
|
3695
|
+
/** Portuguese (Portugal) */
|
|
3696
|
+
Pt = "pt",
|
|
3697
|
+
/** Portuguese (Brazil) */
|
|
3698
|
+
PtBr = "pt-br",
|
|
3699
|
+
/** Russian */
|
|
3700
|
+
Ru = "ru",
|
|
3701
|
+
/** Slovak */
|
|
3702
|
+
Sk = "sk",
|
|
3703
|
+
/** Slovenian */
|
|
3704
|
+
Sl = "sl",
|
|
3705
|
+
/** Serbian */
|
|
3706
|
+
Sr = "sr",
|
|
3707
|
+
/** Swedish */
|
|
3708
|
+
Sv = "sv",
|
|
3709
|
+
/** Turkish */
|
|
3710
|
+
Tr = "tr",
|
|
3711
|
+
/** Urdu */
|
|
3712
|
+
Ur = "ur",
|
|
3713
|
+
/** Chinese (Simplified) */
|
|
3714
|
+
Zh = "zh"
|
|
3715
|
+
}
|
|
3716
|
+
/**
|
|
3717
|
+
* Defines values for SplitSkillLanguage. \
|
|
3718
|
+
* {@link KnownSplitSkillLanguage} can be used interchangeably with SplitSkillLanguage,
|
|
3719
|
+
* this enum contains the known values that the service supports.
|
|
3720
|
+
* ### Known values supported by the service
|
|
3721
|
+
* **am**: Amharic \
|
|
3722
|
+
* **bs**: Bosnian \
|
|
3723
|
+
* **cs**: Czech \
|
|
3724
|
+
* **da**: Danish \
|
|
3725
|
+
* **de**: German \
|
|
3726
|
+
* **en**: English \
|
|
3727
|
+
* **es**: Spanish \
|
|
3728
|
+
* **et**: Estonian \
|
|
3729
|
+
* **fi**: Finnish \
|
|
3730
|
+
* **fr**: French \
|
|
3731
|
+
* **he**: Hebrew \
|
|
3732
|
+
* **hi**: Hindi \
|
|
3733
|
+
* **hr**: Croatian \
|
|
3734
|
+
* **hu**: Hungarian \
|
|
3735
|
+
* **id**: Indonesian \
|
|
3736
|
+
* **is**: Icelandic \
|
|
3737
|
+
* **it**: Italian \
|
|
3738
|
+
* **ja**: Japanese \
|
|
3739
|
+
* **ko**: Korean \
|
|
3740
|
+
* **lv**: Latvian \
|
|
3741
|
+
* **nb**: Norwegian \
|
|
3742
|
+
* **nl**: Dutch \
|
|
3743
|
+
* **pl**: Polish \
|
|
3744
|
+
* **pt**: Portuguese (Portugal) \
|
|
3745
|
+
* **pt-br**: Portuguese (Brazil) \
|
|
3746
|
+
* **ru**: Russian \
|
|
3747
|
+
* **sk**: Slovak \
|
|
3748
|
+
* **sl**: Slovenian \
|
|
3749
|
+
* **sr**: Serbian \
|
|
3750
|
+
* **sv**: Swedish \
|
|
3751
|
+
* **tr**: Turkish \
|
|
3752
|
+
* **ur**: Urdu \
|
|
3753
|
+
* **zh**: Chinese (Simplified)
|
|
3754
|
+
*/
|
|
3755
|
+
export type SplitSkillLanguage = string;
|
|
3756
|
+
/** Known values of {@link TextSplitMode} that the service accepts. */
|
|
3757
|
+
export declare enum KnownTextSplitMode {
|
|
3758
|
+
/** Split the text into individual pages. */
|
|
3759
|
+
Pages = "pages",
|
|
3760
|
+
/** Split the text into individual sentences. */
|
|
3761
|
+
Sentences = "sentences"
|
|
3762
|
+
}
|
|
3763
|
+
/**
|
|
3764
|
+
* Defines values for TextSplitMode. \
|
|
3765
|
+
* {@link KnownTextSplitMode} can be used interchangeably with TextSplitMode,
|
|
3766
|
+
* this enum contains the known values that the service supports.
|
|
3767
|
+
* ### Known values supported by the service
|
|
3768
|
+
* **pages**: Split the text into individual pages. \
|
|
3769
|
+
* **sentences**: Split the text into individual sentences.
|
|
3770
|
+
*/
|
|
3771
|
+
export type TextSplitMode = string;
|
|
3772
|
+
/** Known values of {@link CustomEntityLookupSkillLanguage} that the service accepts. */
|
|
3773
|
+
export declare enum KnownCustomEntityLookupSkillLanguage {
|
|
3774
|
+
/** Danish */
|
|
3775
|
+
Da = "da",
|
|
3776
|
+
/** German */
|
|
3777
|
+
De = "de",
|
|
3778
|
+
/** English */
|
|
3779
|
+
En = "en",
|
|
3780
|
+
/** Spanish */
|
|
3781
|
+
Es = "es",
|
|
3782
|
+
/** Finnish */
|
|
3783
|
+
Fi = "fi",
|
|
3784
|
+
/** French */
|
|
3785
|
+
Fr = "fr",
|
|
3786
|
+
/** Italian */
|
|
3787
|
+
It = "it",
|
|
3788
|
+
/** Korean */
|
|
3789
|
+
Ko = "ko",
|
|
3790
|
+
/** Portuguese */
|
|
3791
|
+
Pt = "pt"
|
|
3792
|
+
}
|
|
3793
|
+
/**
|
|
3794
|
+
* Defines values for CustomEntityLookupSkillLanguage. \
|
|
3795
|
+
* {@link KnownCustomEntityLookupSkillLanguage} can be used interchangeably with CustomEntityLookupSkillLanguage,
|
|
3796
|
+
* this enum contains the known values that the service supports.
|
|
3797
|
+
* ### Known values supported by the service
|
|
3798
|
+
* **da**: Danish \
|
|
3799
|
+
* **de**: German \
|
|
3800
|
+
* **en**: English \
|
|
3801
|
+
* **es**: Spanish \
|
|
3802
|
+
* **fi**: Finnish \
|
|
3803
|
+
* **fr**: French \
|
|
3804
|
+
* **it**: Italian \
|
|
3805
|
+
* **ko**: Korean \
|
|
3806
|
+
* **pt**: Portuguese
|
|
3807
|
+
*/
|
|
3808
|
+
export type CustomEntityLookupSkillLanguage = string;
|
|
3809
|
+
/** Known values of {@link TextTranslationSkillLanguage} that the service accepts. */
|
|
3810
|
+
export declare enum KnownTextTranslationSkillLanguage {
|
|
3811
|
+
/** Afrikaans */
|
|
3812
|
+
Af = "af",
|
|
3813
|
+
/** Arabic */
|
|
3814
|
+
Ar = "ar",
|
|
3815
|
+
/** Bangla */
|
|
3816
|
+
Bn = "bn",
|
|
3817
|
+
/** Bosnian (Latin) */
|
|
3818
|
+
Bs = "bs",
|
|
3819
|
+
/** Bulgarian */
|
|
3820
|
+
Bg = "bg",
|
|
3821
|
+
/** Cantonese (Traditional) */
|
|
3822
|
+
Yue = "yue",
|
|
3823
|
+
/** Catalan */
|
|
3824
|
+
Ca = "ca",
|
|
3825
|
+
/** Chinese Simplified */
|
|
3826
|
+
ZhHans = "zh-Hans",
|
|
3827
|
+
/** Chinese Traditional */
|
|
3828
|
+
ZhHant = "zh-Hant",
|
|
3829
|
+
/** Croatian */
|
|
3830
|
+
Hr = "hr",
|
|
3831
|
+
/** Czech */
|
|
3832
|
+
Cs = "cs",
|
|
3833
|
+
/** Danish */
|
|
3834
|
+
Da = "da",
|
|
3835
|
+
/** Dutch */
|
|
3836
|
+
Nl = "nl",
|
|
3837
|
+
/** English */
|
|
3838
|
+
En = "en",
|
|
3839
|
+
/** Estonian */
|
|
3840
|
+
Et = "et",
|
|
3841
|
+
/** Fijian */
|
|
3842
|
+
Fj = "fj",
|
|
3843
|
+
/** Filipino */
|
|
3844
|
+
Fil = "fil",
|
|
3845
|
+
/** Finnish */
|
|
3846
|
+
Fi = "fi",
|
|
3847
|
+
/** French */
|
|
3848
|
+
Fr = "fr",
|
|
3849
|
+
/** German */
|
|
3850
|
+
De = "de",
|
|
3851
|
+
/** Greek */
|
|
3852
|
+
El = "el",
|
|
3853
|
+
/** Haitian Creole */
|
|
3854
|
+
Ht = "ht",
|
|
3855
|
+
/** Hebrew */
|
|
3856
|
+
He = "he",
|
|
3857
|
+
/** Hindi */
|
|
3858
|
+
Hi = "hi",
|
|
3859
|
+
/** Hmong Daw */
|
|
3860
|
+
Mww = "mww",
|
|
3861
|
+
/** Hungarian */
|
|
3862
|
+
Hu = "hu",
|
|
3863
|
+
/** Icelandic */
|
|
3864
|
+
Is = "is",
|
|
3865
|
+
/** Indonesian */
|
|
3866
|
+
Id = "id",
|
|
3867
|
+
/** Italian */
|
|
3868
|
+
It = "it",
|
|
3869
|
+
/** Japanese */
|
|
3870
|
+
Ja = "ja",
|
|
3871
|
+
/** Kiswahili */
|
|
3872
|
+
Sw = "sw",
|
|
3873
|
+
/** Klingon */
|
|
3874
|
+
Tlh = "tlh",
|
|
3875
|
+
/** Klingon (Latin script) */
|
|
3876
|
+
TlhLatn = "tlh-Latn",
|
|
3877
|
+
/** Klingon (Klingon script) */
|
|
3878
|
+
TlhPiqd = "tlh-Piqd",
|
|
3879
|
+
/** Korean */
|
|
3880
|
+
Ko = "ko",
|
|
3881
|
+
/** Latvian */
|
|
3882
|
+
Lv = "lv",
|
|
3883
|
+
/** Lithuanian */
|
|
3884
|
+
Lt = "lt",
|
|
3885
|
+
/** Malagasy */
|
|
3886
|
+
Mg = "mg",
|
|
3887
|
+
/** Malay */
|
|
3888
|
+
Ms = "ms",
|
|
3889
|
+
/** Maltese */
|
|
3890
|
+
Mt = "mt",
|
|
3891
|
+
/** Norwegian */
|
|
3892
|
+
Nb = "nb",
|
|
3893
|
+
/** Persian */
|
|
3894
|
+
Fa = "fa",
|
|
3895
|
+
/** Polish */
|
|
3896
|
+
Pl = "pl",
|
|
3897
|
+
/** Portuguese */
|
|
3898
|
+
Pt = "pt",
|
|
3899
|
+
/** Portuguese (Brazil) */
|
|
3900
|
+
PtBr = "pt-br",
|
|
3901
|
+
/** Portuguese (Portugal) */
|
|
3902
|
+
PtPT = "pt-PT",
|
|
3903
|
+
/** Queretaro Otomi */
|
|
3904
|
+
Otq = "otq",
|
|
3905
|
+
/** Romanian */
|
|
3906
|
+
Ro = "ro",
|
|
3907
|
+
/** Russian */
|
|
3908
|
+
Ru = "ru",
|
|
3909
|
+
/** Samoan */
|
|
3910
|
+
Sm = "sm",
|
|
3911
|
+
/** Serbian (Cyrillic) */
|
|
3912
|
+
SrCyrl = "sr-Cyrl",
|
|
3913
|
+
/** Serbian (Latin) */
|
|
3914
|
+
SrLatn = "sr-Latn",
|
|
3915
|
+
/** Slovak */
|
|
3916
|
+
Sk = "sk",
|
|
3917
|
+
/** Slovenian */
|
|
3918
|
+
Sl = "sl",
|
|
3919
|
+
/** Spanish */
|
|
3920
|
+
Es = "es",
|
|
3921
|
+
/** Swedish */
|
|
3922
|
+
Sv = "sv",
|
|
3923
|
+
/** Tahitian */
|
|
3924
|
+
Ty = "ty",
|
|
3925
|
+
/** Tamil */
|
|
3926
|
+
Ta = "ta",
|
|
3927
|
+
/** Telugu */
|
|
3928
|
+
Te = "te",
|
|
3929
|
+
/** Thai */
|
|
3930
|
+
Th = "th",
|
|
3931
|
+
/** Tongan */
|
|
3932
|
+
To = "to",
|
|
3933
|
+
/** Turkish */
|
|
3934
|
+
Tr = "tr",
|
|
3935
|
+
/** Ukrainian */
|
|
3936
|
+
Uk = "uk",
|
|
3937
|
+
/** Urdu */
|
|
3938
|
+
Ur = "ur",
|
|
3939
|
+
/** Vietnamese */
|
|
3940
|
+
Vi = "vi",
|
|
3941
|
+
/** Welsh */
|
|
3942
|
+
Cy = "cy",
|
|
3943
|
+
/** Yucatec Maya */
|
|
3944
|
+
Yua = "yua",
|
|
3945
|
+
/** Irish */
|
|
3946
|
+
Ga = "ga",
|
|
3947
|
+
/** Kannada */
|
|
3948
|
+
Kn = "kn",
|
|
3949
|
+
/** Maori */
|
|
3950
|
+
Mi = "mi",
|
|
3951
|
+
/** Malayalam */
|
|
3952
|
+
Ml = "ml",
|
|
3953
|
+
/** Punjabi */
|
|
3954
|
+
Pa = "pa"
|
|
3955
|
+
}
|
|
3956
|
+
/**
|
|
3957
|
+
* Defines values for TextTranslationSkillLanguage. \
|
|
3958
|
+
* {@link KnownTextTranslationSkillLanguage} can be used interchangeably with TextTranslationSkillLanguage,
|
|
3959
|
+
* this enum contains the known values that the service supports.
|
|
3960
|
+
* ### Known values supported by the service
|
|
3961
|
+
* **af**: Afrikaans \
|
|
3962
|
+
* **ar**: Arabic \
|
|
3963
|
+
* **bn**: Bangla \
|
|
3964
|
+
* **bs**: Bosnian (Latin) \
|
|
3965
|
+
* **bg**: Bulgarian \
|
|
3966
|
+
* **yue**: Cantonese (Traditional) \
|
|
3967
|
+
* **ca**: Catalan \
|
|
3968
|
+
* **zh-Hans**: Chinese Simplified \
|
|
3969
|
+
* **zh-Hant**: Chinese Traditional \
|
|
3970
|
+
* **hr**: Croatian \
|
|
3971
|
+
* **cs**: Czech \
|
|
3972
|
+
* **da**: Danish \
|
|
3973
|
+
* **nl**: Dutch \
|
|
3974
|
+
* **en**: English \
|
|
3975
|
+
* **et**: Estonian \
|
|
3976
|
+
* **fj**: Fijian \
|
|
3977
|
+
* **fil**: Filipino \
|
|
3978
|
+
* **fi**: Finnish \
|
|
3979
|
+
* **fr**: French \
|
|
3980
|
+
* **de**: German \
|
|
3981
|
+
* **el**: Greek \
|
|
3982
|
+
* **ht**: Haitian Creole \
|
|
3983
|
+
* **he**: Hebrew \
|
|
3984
|
+
* **hi**: Hindi \
|
|
3985
|
+
* **mww**: Hmong Daw \
|
|
3986
|
+
* **hu**: Hungarian \
|
|
3987
|
+
* **is**: Icelandic \
|
|
3988
|
+
* **id**: Indonesian \
|
|
3989
|
+
* **it**: Italian \
|
|
3990
|
+
* **ja**: Japanese \
|
|
3991
|
+
* **sw**: Kiswahili \
|
|
3992
|
+
* **tlh**: Klingon \
|
|
3993
|
+
* **tlh-Latn**: Klingon (Latin script) \
|
|
3994
|
+
* **tlh-Piqd**: Klingon (Klingon script) \
|
|
3995
|
+
* **ko**: Korean \
|
|
3996
|
+
* **lv**: Latvian \
|
|
3997
|
+
* **lt**: Lithuanian \
|
|
3998
|
+
* **mg**: Malagasy \
|
|
3999
|
+
* **ms**: Malay \
|
|
4000
|
+
* **mt**: Maltese \
|
|
4001
|
+
* **nb**: Norwegian \
|
|
4002
|
+
* **fa**: Persian \
|
|
4003
|
+
* **pl**: Polish \
|
|
4004
|
+
* **pt**: Portuguese \
|
|
4005
|
+
* **pt-br**: Portuguese (Brazil) \
|
|
4006
|
+
* **pt-PT**: Portuguese (Portugal) \
|
|
4007
|
+
* **otq**: Queretaro Otomi \
|
|
4008
|
+
* **ro**: Romanian \
|
|
4009
|
+
* **ru**: Russian \
|
|
4010
|
+
* **sm**: Samoan \
|
|
4011
|
+
* **sr-Cyrl**: Serbian (Cyrillic) \
|
|
4012
|
+
* **sr-Latn**: Serbian (Latin) \
|
|
4013
|
+
* **sk**: Slovak \
|
|
4014
|
+
* **sl**: Slovenian \
|
|
4015
|
+
* **es**: Spanish \
|
|
4016
|
+
* **sv**: Swedish \
|
|
4017
|
+
* **ty**: Tahitian \
|
|
4018
|
+
* **ta**: Tamil \
|
|
4019
|
+
* **te**: Telugu \
|
|
4020
|
+
* **th**: Thai \
|
|
4021
|
+
* **to**: Tongan \
|
|
4022
|
+
* **tr**: Turkish \
|
|
4023
|
+
* **uk**: Ukrainian \
|
|
4024
|
+
* **ur**: Urdu \
|
|
4025
|
+
* **vi**: Vietnamese \
|
|
4026
|
+
* **cy**: Welsh \
|
|
4027
|
+
* **yua**: Yucatec Maya \
|
|
4028
|
+
* **ga**: Irish \
|
|
4029
|
+
* **kn**: Kannada \
|
|
4030
|
+
* **mi**: Maori \
|
|
4031
|
+
* **ml**: Malayalam \
|
|
4032
|
+
* **pa**: Punjabi
|
|
4033
|
+
*/
|
|
4034
|
+
export type TextTranslationSkillLanguage = string;
|
|
4035
|
+
/** Known values of {@link DocumentIntelligenceLayoutSkillOutputFormat} that the service accepts. */
|
|
4036
|
+
export declare enum KnownDocumentIntelligenceLayoutSkillOutputFormat {
|
|
4037
|
+
/** Specify the format of the output as text. */
|
|
4038
|
+
Text = "text",
|
|
4039
|
+
/** Specify the format of the output as markdown. */
|
|
4040
|
+
Markdown = "markdown"
|
|
4041
|
+
}
|
|
4042
|
+
/**
|
|
4043
|
+
* Defines values for DocumentIntelligenceLayoutSkillOutputFormat. \
|
|
4044
|
+
* {@link KnownDocumentIntelligenceLayoutSkillOutputFormat} can be used interchangeably with DocumentIntelligenceLayoutSkillOutputFormat,
|
|
4045
|
+
* this enum contains the known values that the service supports.
|
|
4046
|
+
* ### Known values supported by the service
|
|
4047
|
+
* **text**: Specify the format of the output as text. \
|
|
4048
|
+
* **markdown**: Specify the format of the output as markdown.
|
|
4049
|
+
*/
|
|
4050
|
+
export type DocumentIntelligenceLayoutSkillOutputFormat = string;
|
|
4051
|
+
/** Known values of {@link DocumentIntelligenceLayoutSkillOutputMode} that the service accepts. */
|
|
4052
|
+
export declare enum KnownDocumentIntelligenceLayoutSkillOutputMode {
|
|
4053
|
+
/** Specify that the output should be parsed as 'oneToMany'. */
|
|
4054
|
+
OneToMany = "oneToMany"
|
|
4055
|
+
}
|
|
4056
|
+
/**
|
|
4057
|
+
* Defines values for DocumentIntelligenceLayoutSkillOutputMode. \
|
|
4058
|
+
* {@link KnownDocumentIntelligenceLayoutSkillOutputMode} can be used interchangeably with DocumentIntelligenceLayoutSkillOutputMode,
|
|
4059
|
+
* this enum contains the known values that the service supports.
|
|
4060
|
+
* ### Known values supported by the service
|
|
4061
|
+
* **oneToMany**: Specify that the output should be parsed as 'oneToMany'.
|
|
4062
|
+
*/
|
|
4063
|
+
export type DocumentIntelligenceLayoutSkillOutputMode = string;
|
|
4064
|
+
/** Known values of {@link DocumentIntelligenceLayoutSkillMarkdownHeaderDepth} that the service accepts. */
|
|
4065
|
+
export declare enum KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth {
|
|
4066
|
+
/** Header level 1. */
|
|
4067
|
+
H1 = "h1",
|
|
4068
|
+
/** Header level 2. */
|
|
4069
|
+
H2 = "h2",
|
|
4070
|
+
/** Header level 3. */
|
|
4071
|
+
H3 = "h3",
|
|
4072
|
+
/** Header level 4. */
|
|
4073
|
+
H4 = "h4",
|
|
4074
|
+
/** Header level 5. */
|
|
4075
|
+
H5 = "h5",
|
|
4076
|
+
/** Header level 6. */
|
|
4077
|
+
H6 = "h6"
|
|
4078
|
+
}
|
|
4079
|
+
/**
|
|
4080
|
+
* Defines values for DocumentIntelligenceLayoutSkillMarkdownHeaderDepth. \
|
|
4081
|
+
* {@link KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth} can be used interchangeably with DocumentIntelligenceLayoutSkillMarkdownHeaderDepth,
|
|
4082
|
+
* this enum contains the known values that the service supports.
|
|
4083
|
+
* ### Known values supported by the service
|
|
4084
|
+
* **h1**: Header level 1. \
|
|
4085
|
+
* **h2**: Header level 2. \
|
|
4086
|
+
* **h3**: Header level 3. \
|
|
4087
|
+
* **h4**: Header level 4. \
|
|
4088
|
+
* **h5**: Header level 5. \
|
|
4089
|
+
* **h6**: Header level 6.
|
|
4090
|
+
*/
|
|
4091
|
+
export type DocumentIntelligenceLayoutSkillMarkdownHeaderDepth = string;
|
|
4092
|
+
/** Known values of {@link DocumentIntelligenceLayoutSkillExtractionOptions} that the service accepts. */
|
|
4093
|
+
export declare enum KnownDocumentIntelligenceLayoutSkillExtractionOptions {
|
|
4094
|
+
/** Specify that image content should be extracted from the document. */
|
|
4095
|
+
Images = "images",
|
|
4096
|
+
/** Specify that location metadata should be extracted from the document. */
|
|
4097
|
+
LocationMetadata = "locationMetadata"
|
|
4098
|
+
}
|
|
4099
|
+
/**
|
|
4100
|
+
* Defines values for DocumentIntelligenceLayoutSkillExtractionOptions. \
|
|
4101
|
+
* {@link KnownDocumentIntelligenceLayoutSkillExtractionOptions} can be used interchangeably with DocumentIntelligenceLayoutSkillExtractionOptions,
|
|
4102
|
+
* this enum contains the known values that the service supports.
|
|
4103
|
+
* ### Known values supported by the service
|
|
4104
|
+
* **images**: Specify that image content should be extracted from the document. \
|
|
4105
|
+
* **locationMetadata**: Specify that location metadata should be extracted from the document.
|
|
4106
|
+
*/
|
|
4107
|
+
export type DocumentIntelligenceLayoutSkillExtractionOptions = string;
|
|
4108
|
+
/** Known values of {@link DocumentIntelligenceLayoutSkillChunkingUnit} that the service accepts. */
|
|
4109
|
+
export declare enum KnownDocumentIntelligenceLayoutSkillChunkingUnit {
|
|
4110
|
+
/** Specifies chunk by characters. */
|
|
4111
|
+
Characters = "characters"
|
|
4112
|
+
}
|
|
4113
|
+
/**
|
|
4114
|
+
* Defines values for DocumentIntelligenceLayoutSkillChunkingUnit. \
|
|
4115
|
+
* {@link KnownDocumentIntelligenceLayoutSkillChunkingUnit} can be used interchangeably with DocumentIntelligenceLayoutSkillChunkingUnit,
|
|
4116
|
+
* this enum contains the known values that the service supports.
|
|
4117
|
+
* ### Known values supported by the service
|
|
4118
|
+
* **characters**: Specifies chunk by characters.
|
|
4119
|
+
*/
|
|
4120
|
+
export type DocumentIntelligenceLayoutSkillChunkingUnit = string;
|
|
4121
|
+
/** Known values of {@link LexicalTokenizerName} that the service accepts. */
|
|
4122
|
+
export declare enum KnownLexicalTokenizerName {
|
|
4123
|
+
/** Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html */
|
|
4124
|
+
Classic = "classic",
|
|
4125
|
+
/** Tokenizes the input from an edge into n-grams of the given size(s). See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenizer.html */
|
|
4126
|
+
EdgeNGram = "edgeNGram",
|
|
4127
|
+
/** Emits the entire input as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordTokenizer.html */
|
|
4128
|
+
Keyword = "keyword_v2",
|
|
4129
|
+
/** Divides text at non-letters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LetterTokenizer.html */
|
|
4130
|
+
Letter = "letter",
|
|
4131
|
+
/** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseTokenizer.html */
|
|
4132
|
+
Lowercase = "lowercase",
|
|
4133
|
+
/** Divides text using language-specific rules. */
|
|
4134
|
+
MicrosoftLanguageTokenizer = "microsoft_language_tokenizer",
|
|
4135
|
+
/** Divides text using language-specific rules and reduces words to their base forms. */
|
|
4136
|
+
MicrosoftLanguageStemmingTokenizer = "microsoft_language_stemming_tokenizer",
|
|
4137
|
+
/** Tokenizes the input into n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenizer.html */
|
|
4138
|
+
NGram = "nGram",
|
|
4139
|
+
/** Tokenizer for path-like hierarchies. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/path\/PathHierarchyTokenizer.html */
|
|
4140
|
+
PathHierarchy = "path_hierarchy_v2",
|
|
4141
|
+
/** Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/pattern\/PatternTokenizer.html */
|
|
4142
|
+
Pattern = "pattern",
|
|
4143
|
+
/** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/StandardTokenizer.html */
|
|
4144
|
+
Standard = "standard_v2",
|
|
4145
|
+
/** Tokenizes urls and emails as one token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/UAX29URLEmailTokenizer.html */
|
|
4146
|
+
UaxUrlEmail = "uax_url_email",
|
|
4147
|
+
/** Divides text at whitespace. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html */
|
|
4148
|
+
Whitespace = "whitespace"
|
|
4149
|
+
}
|
|
4150
|
+
/**
|
|
4151
|
+
* Defines values for LexicalTokenizerName. \
|
|
4152
|
+
* {@link KnownLexicalTokenizerName} can be used interchangeably with LexicalTokenizerName,
|
|
4153
|
+
* this enum contains the known values that the service supports.
|
|
4154
|
+
* ### Known values supported by the service
|
|
4155
|
+
* **classic**: Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html \
|
|
4156
|
+
* **edgeNGram**: Tokenizes the input from an edge into n-grams of the given size(s). See https:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenizer.html \
|
|
4157
|
+
* **keyword_v2**: Emits the entire input as a single token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordTokenizer.html \
|
|
4158
|
+
* **letter**: Divides text at non-letters. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LetterTokenizer.html \
|
|
4159
|
+
* **lowercase**: Divides text at non-letters and converts them to lower case. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseTokenizer.html \
|
|
4160
|
+
* **microsoft_language_tokenizer**: Divides text using language-specific rules. \
|
|
4161
|
+
* **microsoft_language_stemming_tokenizer**: Divides text using language-specific rules and reduces words to their base forms. \
|
|
4162
|
+
* **nGram**: Tokenizes the input into n-grams of the given size(s). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenizer.html \
|
|
4163
|
+
* **path_hierarchy_v2**: Tokenizer for path-like hierarchies. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/path\/PathHierarchyTokenizer.html \
|
|
4164
|
+
* **pattern**: Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/pattern\/PatternTokenizer.html \
|
|
4165
|
+
* **standard_v2**: Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/StandardTokenizer.html \
|
|
4166
|
+
* **uax_url_email**: Tokenizes urls and emails as one token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/UAX29URLEmailTokenizer.html \
|
|
4167
|
+
* **whitespace**: Divides text at whitespace. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html
|
|
4168
|
+
*/
|
|
4169
|
+
export type LexicalTokenizerName = string;
|
|
4170
|
+
/** Known values of {@link RegexFlags} that the service accepts. */
|
|
4171
|
+
export declare enum KnownRegexFlags {
|
|
4172
|
+
/** Enables canonical equivalence. */
|
|
4173
|
+
CanonEq = "CANON_EQ",
|
|
4174
|
+
/** Enables case-insensitive matching. */
|
|
4175
|
+
CaseInsensitive = "CASE_INSENSITIVE",
|
|
4176
|
+
/** Permits whitespace and comments in the pattern. */
|
|
4177
|
+
Comments = "COMMENTS",
|
|
4178
|
+
/** Enables dotall mode. */
|
|
4179
|
+
DotAll = "DOTALL",
|
|
4180
|
+
/** Enables literal parsing of the pattern. */
|
|
4181
|
+
Literal = "LITERAL",
|
|
4182
|
+
/** Enables multiline mode. */
|
|
4183
|
+
Multiline = "MULTILINE",
|
|
4184
|
+
/** Enables Unicode-aware case folding. */
|
|
4185
|
+
UnicodeCase = "UNICODE_CASE",
|
|
4186
|
+
/** Enables Unix lines mode. */
|
|
4187
|
+
UnixLines = "UNIX_LINES"
|
|
4188
|
+
}
|
|
4189
|
+
/**
|
|
4190
|
+
* Defines values for RegexFlags. \
|
|
4191
|
+
* {@link KnownRegexFlags} can be used interchangeably with RegexFlags,
|
|
4192
|
+
* this enum contains the known values that the service supports.
|
|
4193
|
+
* ### Known values supported by the service
|
|
4194
|
+
* **CANON_EQ**: Enables canonical equivalence. \
|
|
4195
|
+
* **CASE_INSENSITIVE**: Enables case-insensitive matching. \
|
|
4196
|
+
* **COMMENTS**: Permits whitespace and comments in the pattern. \
|
|
4197
|
+
* **DOTALL**: Enables dotall mode. \
|
|
4198
|
+
* **LITERAL**: Enables literal parsing of the pattern. \
|
|
4199
|
+
* **MULTILINE**: Enables multiline mode. \
|
|
4200
|
+
* **UNICODE_CASE**: Enables Unicode-aware case folding. \
|
|
4201
|
+
* **UNIX_LINES**: Enables Unix lines mode.
|
|
4202
|
+
*/
|
|
4203
|
+
export type RegexFlags = string;
|
|
4204
|
+
/** Defines values for IndexerStatus. */
|
|
4205
|
+
export type IndexerStatus = "unknown" | "error" | "running";
|
|
4206
|
+
/** Defines values for IndexerExecutionStatus. */
|
|
4207
|
+
export type IndexerExecutionStatus = "transientFailure" | "success" | "inProgress" | "reset";
|
|
4208
|
+
/** Defines values for ScoringFunctionInterpolation. */
|
|
4209
|
+
export type ScoringFunctionInterpolation = "linear" | "constant" | "quadratic" | "logarithmic";
|
|
4210
|
+
/** Defines values for ScoringFunctionAggregation. */
|
|
4211
|
+
export type ScoringFunctionAggregation = "sum" | "average" | "minimum" | "maximum" | "firstMatching";
|
|
4212
|
+
/** Defines values for TokenCharacterKind. */
|
|
4213
|
+
export type TokenCharacterKind = "letter" | "digit" | "whitespace" | "punctuation" | "symbol";
|
|
4214
|
+
/** Defines values for MicrosoftTokenizerLanguage. */
|
|
4215
|
+
export type MicrosoftTokenizerLanguage = "bangla" | "bulgarian" | "catalan" | "chineseSimplified" | "chineseTraditional" | "croatian" | "czech" | "danish" | "dutch" | "english" | "french" | "german" | "greek" | "gujarati" | "hindi" | "icelandic" | "indonesian" | "italian" | "japanese" | "kannada" | "korean" | "malay" | "malayalam" | "marathi" | "norwegianBokmaal" | "polish" | "portuguese" | "portugueseBrazilian" | "punjabi" | "romanian" | "russian" | "serbianCyrillic" | "serbianLatin" | "slovenian" | "spanish" | "swedish" | "tamil" | "telugu" | "thai" | "ukrainian" | "urdu" | "vietnamese";
|
|
4216
|
+
/** Defines values for MicrosoftStemmingTokenizerLanguage. */
|
|
4217
|
+
export type MicrosoftStemmingTokenizerLanguage = "arabic" | "bangla" | "bulgarian" | "catalan" | "croatian" | "czech" | "danish" | "dutch" | "english" | "estonian" | "finnish" | "french" | "german" | "greek" | "gujarati" | "hebrew" | "hindi" | "hungarian" | "icelandic" | "indonesian" | "italian" | "kannada" | "latvian" | "lithuanian" | "malay" | "malayalam" | "marathi" | "norwegianBokmaal" | "polish" | "portuguese" | "portugueseBrazilian" | "punjabi" | "romanian" | "russian" | "serbianCyrillic" | "serbianLatin" | "slovak" | "slovenian" | "spanish" | "swedish" | "tamil" | "telugu" | "turkish" | "ukrainian" | "urdu";
|
|
4218
|
+
/** Defines values for CjkBigramTokenFilterScripts. */
|
|
4219
|
+
export type CjkBigramTokenFilterScripts = "han" | "hiragana" | "katakana" | "hangul";
|
|
4220
|
+
/** Defines values for EdgeNGramTokenFilterSide. */
|
|
4221
|
+
export type EdgeNGramTokenFilterSide = "front" | "back";
|
|
4222
|
+
/** Defines values for PhoneticEncoder. */
|
|
4223
|
+
export type PhoneticEncoder = "metaphone" | "doubleMetaphone" | "soundex" | "refinedSoundex" | "caverphone1" | "caverphone2" | "cologne" | "nysiis" | "koelnerPhonetik" | "haasePhonetik" | "beiderMorse";
|
|
4224
|
+
/** Defines values for SnowballTokenFilterLanguage. */
|
|
4225
|
+
export type SnowballTokenFilterLanguage = "armenian" | "basque" | "catalan" | "danish" | "dutch" | "english" | "finnish" | "french" | "german" | "german2" | "hungarian" | "italian" | "kp" | "lovins" | "norwegian" | "porter" | "portuguese" | "romanian" | "russian" | "spanish" | "swedish" | "turkish";
|
|
4226
|
+
/** Defines values for StemmerTokenFilterLanguage. */
|
|
4227
|
+
export type StemmerTokenFilterLanguage = "arabic" | "armenian" | "basque" | "brazilian" | "bulgarian" | "catalan" | "czech" | "danish" | "dutch" | "dutchKp" | "english" | "lightEnglish" | "minimalEnglish" | "possessiveEnglish" | "porter2" | "lovins" | "finnish" | "lightFinnish" | "french" | "lightFrench" | "minimalFrench" | "galician" | "minimalGalician" | "german" | "german2" | "lightGerman" | "minimalGerman" | "greek" | "hindi" | "hungarian" | "lightHungarian" | "indonesian" | "irish" | "italian" | "lightItalian" | "sorani" | "latvian" | "norwegian" | "lightNorwegian" | "minimalNorwegian" | "lightNynorsk" | "minimalNynorsk" | "portuguese" | "lightPortuguese" | "minimalPortuguese" | "portugueseRslp" | "romanian" | "russian" | "lightRussian" | "spanish" | "lightSpanish" | "swedish" | "lightSwedish" | "turkish";
|
|
4228
|
+
/** Defines values for StopwordsList. */
|
|
4229
|
+
export type StopwordsList = "arabic" | "armenian" | "basque" | "brazilian" | "bulgarian" | "catalan" | "czech" | "danish" | "dutch" | "english" | "finnish" | "french" | "galician" | "german" | "greek" | "hindi" | "hungarian" | "indonesian" | "irish" | "italian" | "latvian" | "norwegian" | "persian" | "portuguese" | "romanian" | "russian" | "sorani" | "spanish" | "swedish" | "thai" | "turkish";
|
|
4230
|
+
/** Optional parameters. */
|
|
4231
|
+
export interface DataSourcesCreateOrUpdateOptionalParams extends coreClient.OperationOptions {
|
|
4232
|
+
/** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */
|
|
4233
|
+
ifMatch?: string;
|
|
4234
|
+
/** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */
|
|
4235
|
+
ifNoneMatch?: string;
|
|
4236
|
+
}
|
|
4237
|
+
/** Contains response data for the createOrUpdate operation. */
|
|
4238
|
+
export type DataSourcesCreateOrUpdateResponse = SearchIndexerDataSource;
|
|
4239
|
+
/** Optional parameters. */
|
|
4240
|
+
export interface DataSourcesDeleteOptionalParams extends coreClient.OperationOptions {
|
|
4241
|
+
/** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */
|
|
4242
|
+
ifMatch?: string;
|
|
4243
|
+
/** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */
|
|
4244
|
+
ifNoneMatch?: string;
|
|
4245
|
+
}
|
|
4246
|
+
/** Optional parameters. */
|
|
4247
|
+
export interface DataSourcesGetOptionalParams extends coreClient.OperationOptions {
|
|
4248
|
+
}
|
|
4249
|
+
/** Contains response data for the get operation. */
|
|
4250
|
+
export type DataSourcesGetResponse = SearchIndexerDataSource;
|
|
4251
|
+
/** Optional parameters. */
|
|
4252
|
+
export interface DataSourcesListOptionalParams extends coreClient.OperationOptions {
|
|
4253
|
+
/** Selects which top-level properties of the data sources to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */
|
|
4254
|
+
select?: string;
|
|
4255
|
+
}
|
|
4256
|
+
/** Contains response data for the list operation. */
|
|
4257
|
+
export type DataSourcesListResponse = ListDataSourcesResult;
|
|
4258
|
+
/** Optional parameters. */
|
|
4259
|
+
export interface DataSourcesCreateOptionalParams extends coreClient.OperationOptions {
|
|
4260
|
+
}
|
|
4261
|
+
/** Contains response data for the create operation. */
|
|
4262
|
+
export type DataSourcesCreateResponse = SearchIndexerDataSource;
|
|
4263
|
+
/** Optional parameters. */
|
|
4264
|
+
export interface IndexersResetOptionalParams extends coreClient.OperationOptions {
|
|
4265
|
+
}
|
|
4266
|
+
/** Optional parameters. */
|
|
4267
|
+
export interface IndexersRunOptionalParams extends coreClient.OperationOptions {
|
|
4268
|
+
}
|
|
4269
|
+
/** Optional parameters. */
|
|
4270
|
+
export interface IndexersCreateOrUpdateOptionalParams extends coreClient.OperationOptions {
|
|
4271
|
+
/** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */
|
|
4272
|
+
ifMatch?: string;
|
|
4273
|
+
/** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */
|
|
4274
|
+
ifNoneMatch?: string;
|
|
4275
|
+
}
|
|
4276
|
+
/** Contains response data for the createOrUpdate operation. */
|
|
4277
|
+
export type IndexersCreateOrUpdateResponse = SearchIndexer;
|
|
4278
|
+
/** Optional parameters. */
|
|
4279
|
+
export interface IndexersDeleteOptionalParams extends coreClient.OperationOptions {
|
|
4280
|
+
/** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */
|
|
4281
|
+
ifMatch?: string;
|
|
4282
|
+
/** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */
|
|
4283
|
+
ifNoneMatch?: string;
|
|
4284
|
+
}
|
|
4285
|
+
/** Optional parameters. */
|
|
4286
|
+
export interface IndexersGetOptionalParams extends coreClient.OperationOptions {
|
|
4287
|
+
}
|
|
4288
|
+
/** Contains response data for the get operation. */
|
|
4289
|
+
export type IndexersGetResponse = SearchIndexer;
|
|
4290
|
+
/** Optional parameters. */
|
|
4291
|
+
export interface IndexersListOptionalParams extends coreClient.OperationOptions {
|
|
4292
|
+
/** Selects which top-level properties of the indexers to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */
|
|
4293
|
+
select?: string;
|
|
4294
|
+
}
|
|
4295
|
+
/** Contains response data for the list operation. */
|
|
4296
|
+
export type IndexersListResponse = ListIndexersResult;
|
|
4297
|
+
/** Optional parameters. */
|
|
4298
|
+
export interface IndexersCreateOptionalParams extends coreClient.OperationOptions {
|
|
4299
|
+
}
|
|
4300
|
+
/** Contains response data for the create operation. */
|
|
4301
|
+
export type IndexersCreateResponse = SearchIndexer;
|
|
4302
|
+
/** Optional parameters. */
|
|
4303
|
+
export interface IndexersGetStatusOptionalParams extends coreClient.OperationOptions {
|
|
4304
|
+
}
|
|
4305
|
+
/** Contains response data for the getStatus operation. */
|
|
4306
|
+
export type IndexersGetStatusResponse = SearchIndexerStatus;
|
|
4307
|
+
/** Optional parameters. */
|
|
4308
|
+
export interface SkillsetsCreateOrUpdateOptionalParams extends coreClient.OperationOptions {
|
|
4309
|
+
/** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */
|
|
4310
|
+
ifMatch?: string;
|
|
4311
|
+
/** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */
|
|
4312
|
+
ifNoneMatch?: string;
|
|
4313
|
+
}
|
|
4314
|
+
/** Contains response data for the createOrUpdate operation. */
|
|
4315
|
+
export type SkillsetsCreateOrUpdateResponse = SearchIndexerSkillset;
|
|
4316
|
+
/** Optional parameters. */
|
|
4317
|
+
export interface SkillsetsDeleteOptionalParams extends coreClient.OperationOptions {
|
|
4318
|
+
/** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */
|
|
4319
|
+
ifMatch?: string;
|
|
4320
|
+
/** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */
|
|
4321
|
+
ifNoneMatch?: string;
|
|
4322
|
+
}
|
|
4323
|
+
/** Optional parameters. */
|
|
4324
|
+
export interface SkillsetsGetOptionalParams extends coreClient.OperationOptions {
|
|
4325
|
+
}
|
|
4326
|
+
/** Contains response data for the get operation. */
|
|
4327
|
+
export type SkillsetsGetResponse = SearchIndexerSkillset;
|
|
4328
|
+
/** Optional parameters. */
|
|
4329
|
+
export interface SkillsetsListOptionalParams extends coreClient.OperationOptions {
|
|
4330
|
+
/** Selects which top-level properties of the skillsets to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */
|
|
4331
|
+
select?: string;
|
|
4332
|
+
}
|
|
4333
|
+
/** Contains response data for the list operation. */
|
|
4334
|
+
export type SkillsetsListResponse = ListSkillsetsResult;
|
|
4335
|
+
/** Optional parameters. */
|
|
4336
|
+
export interface SkillsetsCreateOptionalParams extends coreClient.OperationOptions {
|
|
4337
|
+
}
|
|
4338
|
+
/** Contains response data for the create operation. */
|
|
4339
|
+
export type SkillsetsCreateResponse = SearchIndexerSkillset;
|
|
4340
|
+
/** Optional parameters. */
|
|
4341
|
+
export interface SynonymMapsCreateOrUpdateOptionalParams extends coreClient.OperationOptions {
|
|
4342
|
+
/** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */
|
|
4343
|
+
ifMatch?: string;
|
|
4344
|
+
/** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */
|
|
4345
|
+
ifNoneMatch?: string;
|
|
4346
|
+
}
|
|
4347
|
+
/** Contains response data for the createOrUpdate operation. */
|
|
4348
|
+
export type SynonymMapsCreateOrUpdateResponse = SynonymMap;
|
|
4349
|
+
/** Optional parameters. */
|
|
4350
|
+
export interface SynonymMapsDeleteOptionalParams extends coreClient.OperationOptions {
|
|
4351
|
+
/** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */
|
|
4352
|
+
ifMatch?: string;
|
|
4353
|
+
/** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */
|
|
4354
|
+
ifNoneMatch?: string;
|
|
4355
|
+
}
|
|
4356
|
+
/** Optional parameters. */
|
|
4357
|
+
export interface SynonymMapsGetOptionalParams extends coreClient.OperationOptions {
|
|
4358
|
+
}
|
|
4359
|
+
/** Contains response data for the get operation. */
|
|
4360
|
+
export type SynonymMapsGetResponse = SynonymMap;
|
|
4361
|
+
/** Optional parameters. */
|
|
4362
|
+
export interface SynonymMapsListOptionalParams extends coreClient.OperationOptions {
|
|
4363
|
+
/** Selects which top-level properties of the synonym maps to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */
|
|
4364
|
+
select?: string;
|
|
4365
|
+
}
|
|
4366
|
+
/** Contains response data for the list operation. */
|
|
4367
|
+
export type SynonymMapsListResponse = ListSynonymMapsResult;
|
|
4368
|
+
/** Optional parameters. */
|
|
4369
|
+
export interface SynonymMapsCreateOptionalParams extends coreClient.OperationOptions {
|
|
4370
|
+
}
|
|
4371
|
+
/** Contains response data for the create operation. */
|
|
4372
|
+
export type SynonymMapsCreateResponse = SynonymMap;
|
|
4373
|
+
/** Optional parameters. */
|
|
4374
|
+
export interface IndexesCreateOptionalParams extends coreClient.OperationOptions {
|
|
4375
|
+
}
|
|
4376
|
+
/** Contains response data for the create operation. */
|
|
4377
|
+
export type IndexesCreateResponse = SearchIndex;
|
|
4378
|
+
/** Optional parameters. */
|
|
4379
|
+
export interface IndexesListOptionalParams extends coreClient.OperationOptions {
|
|
4380
|
+
/** Selects which top-level properties of the index definitions to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */
|
|
4381
|
+
select?: string;
|
|
4382
|
+
}
|
|
4383
|
+
/** Contains response data for the list operation. */
|
|
4384
|
+
export type IndexesListResponse = ListIndexesResult;
|
|
4385
|
+
/** Optional parameters. */
|
|
4386
|
+
export interface IndexesCreateOrUpdateOptionalParams extends coreClient.OperationOptions {
|
|
4387
|
+
/** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */
|
|
4388
|
+
ifMatch?: string;
|
|
4389
|
+
/** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */
|
|
4390
|
+
ifNoneMatch?: string;
|
|
4391
|
+
/** Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of the index can be impaired for several minutes after the index is updated, or longer for very large indexes. */
|
|
4392
|
+
allowIndexDowntime?: boolean;
|
|
4393
|
+
}
|
|
4394
|
+
/** Contains response data for the createOrUpdate operation. */
|
|
4395
|
+
export type IndexesCreateOrUpdateResponse = SearchIndex;
|
|
4396
|
+
/** Optional parameters. */
|
|
4397
|
+
export interface IndexesDeleteOptionalParams extends coreClient.OperationOptions {
|
|
4398
|
+
/** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */
|
|
4399
|
+
ifMatch?: string;
|
|
4400
|
+
/** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */
|
|
4401
|
+
ifNoneMatch?: string;
|
|
4402
|
+
}
|
|
4403
|
+
/** Optional parameters. */
|
|
4404
|
+
export interface IndexesGetOptionalParams extends coreClient.OperationOptions {
|
|
4405
|
+
}
|
|
4406
|
+
/** Contains response data for the get operation. */
|
|
4407
|
+
export type IndexesGetResponse = SearchIndex;
|
|
4408
|
+
/** Optional parameters. */
|
|
4409
|
+
export interface IndexesGetStatisticsOptionalParams extends coreClient.OperationOptions {
|
|
4410
|
+
}
|
|
4411
|
+
/** Contains response data for the getStatistics operation. */
|
|
4412
|
+
export type IndexesGetStatisticsResponse = GetIndexStatisticsResult;
|
|
4413
|
+
/** Optional parameters. */
|
|
4414
|
+
export interface IndexesAnalyzeOptionalParams extends coreClient.OperationOptions {
|
|
4415
|
+
}
|
|
4416
|
+
/** Contains response data for the analyze operation. */
|
|
4417
|
+
export type IndexesAnalyzeResponse = AnalyzeResult;
|
|
4418
|
+
/** Optional parameters. */
|
|
4419
|
+
export interface GetServiceStatisticsOptionalParams extends coreClient.OperationOptions {
|
|
4420
|
+
}
|
|
4421
|
+
/** Contains response data for the getServiceStatistics operation. */
|
|
4422
|
+
export type GetServiceStatisticsResponse = ServiceStatistics;
|
|
4423
|
+
/** Optional parameters. */
|
|
4424
|
+
export interface SearchServiceClientOptionalParams extends coreHttpCompat.ExtendedServiceClientOptions {
|
|
4425
|
+
/** Overrides client endpoint. */
|
|
4426
|
+
endpoint?: string;
|
|
4427
|
+
}
|
|
4428
|
+
//# sourceMappingURL=index.d.ts.map
|