@azure-rest/ai-vision-face 1.0.0-alpha.20250210.1 → 1.0.0-alpha.20250212.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +17 -23
- package/dist/browser/clientDefinitions.d.ts +15 -241
- package/dist/browser/clientDefinitions.d.ts.map +1 -1
- package/dist/browser/clientDefinitions.js.map +1 -1
- package/dist/browser/faceClient.d.ts.map +1 -1
- package/dist/browser/faceClient.js +1 -1
- package/dist/browser/faceClient.js.map +1 -1
- package/dist/browser/isUnexpected.d.ts +3 -29
- package/dist/browser/isUnexpected.d.ts.map +1 -1
- package/dist/browser/isUnexpected.js +7 -31
- package/dist/browser/isUnexpected.js.map +1 -1
- package/dist/browser/models.d.ts +37 -40
- package/dist/browser/models.d.ts.map +1 -1
- package/dist/browser/models.js.map +1 -1
- package/dist/browser/outputModels.d.ts +139 -215
- package/dist/browser/outputModels.d.ts.map +1 -1
- package/dist/browser/outputModels.js.map +1 -1
- package/dist/browser/parameters.d.ts +115 -195
- package/dist/browser/parameters.d.ts.map +1 -1
- package/dist/browser/parameters.js.map +1 -1
- package/dist/browser/pollingHelper.d.ts +1 -8
- package/dist/browser/pollingHelper.d.ts.map +1 -1
- package/dist/browser/pollingHelper.js.map +1 -1
- package/dist/browser/responses.d.ts +9 -434
- package/dist/browser/responses.d.ts.map +1 -1
- package/dist/browser/responses.js.map +1 -1
- package/dist/commonjs/clientDefinitions.d.ts +15 -241
- package/dist/commonjs/clientDefinitions.d.ts.map +1 -1
- package/dist/commonjs/clientDefinitions.js.map +1 -1
- package/dist/commonjs/faceClient.d.ts.map +1 -1
- package/dist/commonjs/faceClient.js +1 -1
- package/dist/commonjs/faceClient.js.map +1 -1
- package/dist/commonjs/isUnexpected.d.ts +3 -29
- package/dist/commonjs/isUnexpected.d.ts.map +1 -1
- package/dist/commonjs/isUnexpected.js +7 -31
- package/dist/commonjs/isUnexpected.js.map +1 -1
- package/dist/commonjs/models.d.ts +37 -40
- package/dist/commonjs/models.d.ts.map +1 -1
- package/dist/commonjs/models.js.map +1 -1
- package/dist/commonjs/outputModels.d.ts +139 -215
- package/dist/commonjs/outputModels.d.ts.map +1 -1
- package/dist/commonjs/outputModels.js.map +1 -1
- package/dist/commonjs/parameters.d.ts +115 -195
- package/dist/commonjs/parameters.d.ts.map +1 -1
- package/dist/commonjs/parameters.js.map +1 -1
- package/dist/commonjs/pollingHelper.d.ts +1 -8
- package/dist/commonjs/pollingHelper.d.ts.map +1 -1
- package/dist/commonjs/pollingHelper.js.map +1 -1
- package/dist/commonjs/responses.d.ts +9 -434
- package/dist/commonjs/responses.d.ts.map +1 -1
- package/dist/commonjs/responses.js.map +1 -1
- package/dist/esm/clientDefinitions.d.ts +15 -241
- package/dist/esm/clientDefinitions.d.ts.map +1 -1
- package/dist/esm/clientDefinitions.js.map +1 -1
- package/dist/esm/faceClient.d.ts.map +1 -1
- package/dist/esm/faceClient.js +1 -1
- package/dist/esm/faceClient.js.map +1 -1
- package/dist/esm/isUnexpected.d.ts +3 -29
- package/dist/esm/isUnexpected.d.ts.map +1 -1
- package/dist/esm/isUnexpected.js +7 -31
- package/dist/esm/isUnexpected.js.map +1 -1
- package/dist/esm/models.d.ts +37 -40
- package/dist/esm/models.d.ts.map +1 -1
- package/dist/esm/models.js.map +1 -1
- package/dist/esm/outputModels.d.ts +139 -215
- package/dist/esm/outputModels.d.ts.map +1 -1
- package/dist/esm/outputModels.js.map +1 -1
- package/dist/esm/parameters.d.ts +115 -195
- package/dist/esm/parameters.d.ts.map +1 -1
- package/dist/esm/parameters.js.map +1 -1
- package/dist/esm/pollingHelper.d.ts +1 -8
- package/dist/esm/pollingHelper.d.ts.map +1 -1
- package/dist/esm/pollingHelper.js.map +1 -1
- package/dist/esm/responses.d.ts +9 -434
- package/dist/esm/responses.d.ts.map +1 -1
- package/dist/esm/responses.js.map +1 -1
- package/dist/react-native/clientDefinitions.d.ts +15 -241
- package/dist/react-native/clientDefinitions.d.ts.map +1 -1
- package/dist/react-native/clientDefinitions.js.map +1 -1
- package/dist/react-native/faceClient.d.ts.map +1 -1
- package/dist/react-native/faceClient.js +1 -1
- package/dist/react-native/faceClient.js.map +1 -1
- package/dist/react-native/isUnexpected.d.ts +3 -29
- package/dist/react-native/isUnexpected.d.ts.map +1 -1
- package/dist/react-native/isUnexpected.js +7 -31
- package/dist/react-native/isUnexpected.js.map +1 -1
- package/dist/react-native/models.d.ts +37 -40
- package/dist/react-native/models.d.ts.map +1 -1
- package/dist/react-native/models.js.map +1 -1
- package/dist/react-native/outputModels.d.ts +139 -215
- package/dist/react-native/outputModels.d.ts.map +1 -1
- package/dist/react-native/outputModels.js.map +1 -1
- package/dist/react-native/parameters.d.ts +115 -195
- package/dist/react-native/parameters.d.ts.map +1 -1
- package/dist/react-native/parameters.js.map +1 -1
- package/dist/react-native/pollingHelper.d.ts +1 -8
- package/dist/react-native/pollingHelper.d.ts.map +1 -1
- package/dist/react-native/pollingHelper.js.map +1 -1
- package/dist/react-native/responses.d.ts +9 -434
- package/dist/react-native/responses.d.ts.map +1 -1
- package/dist/react-native/responses.js.map +1 -1
- package/package.json +1 -1
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"clientDefinitions.js","sourceRoot":"","sources":["../../src/clientDefinitions.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT License.\n\nimport type {\n GetOperationResultParameters,\n DetectFromUrlParameters,\n DetectParameters,\n DetectFromSessionImageIdParameters,\n FindSimilarParameters,\n FindSimilarFromFaceListParameters,\n FindSimilarFromLargeFaceListParameters,\n IdentifyFromPersonGroupParameters,\n IdentifyFromLargePersonGroupParameters,\n IdentifyFromPersonDirectoryParameters,\n IdentifyFromDynamicPersonGroupParameters,\n VerifyFaceToFaceParameters,\n VerifyFromPersonGroupParameters,\n VerifyFromLargePersonGroupParameters,\n VerifyFromPersonDirectoryParameters,\n GroupParameters,\n CreateFaceListParameters,\n DeleteFaceListParameters,\n GetFaceListParameters,\n UpdateFaceListParameters,\n GetFaceListsParameters,\n AddFaceListFaceFromUrlParameters,\n AddFaceListFaceParameters,\n DeleteFaceListFaceParameters,\n CreateLargeFaceListParameters,\n DeleteLargeFaceListParameters,\n GetLargeFaceListParameters,\n UpdateLargeFaceListParameters,\n GetLargeFaceListsParameters,\n GetLargeFaceListTrainingStatusParameters,\n TrainLargeFaceListParameters,\n AddLargeFaceListFaceFromUrlParameters,\n AddLargeFaceListFaceParameters,\n GetLargeFaceListFacesParameters,\n DeleteLargeFaceListFaceParameters,\n GetLargeFaceListFaceParameters,\n UpdateLargeFaceListFaceParameters,\n CreatePersonGroupParameters,\n DeletePersonGroupParameters,\n GetPersonGroupParameters,\n UpdatePersonGroupParameters,\n GetPersonGroupsParameters,\n GetPersonGroupTrainingStatusParameters,\n TrainPersonGroupParameters,\n CreatePersonGroupPersonParameters,\n GetPersonGroupPersonsParameters,\n DeletePersonGroupPersonParameters,\n GetPersonGroupPersonParameters,\n UpdatePersonGroupPersonParameters,\n AddPersonGroupPersonFaceFromUrlParameters,\n AddPersonGroupPersonFaceParameters,\n DeletePersonGroupPersonFaceParameters,\n GetPersonGroupPersonFaceParameters,\n UpdatePersonGroupPersonFaceParameters,\n CreateLargePersonGroupParameters,\n DeleteLargePersonGroupParameters,\n GetLargePersonGroupParameters,\n UpdateLargePersonGroupParameters,\n GetLargePersonGroupsParameters,\n GetLargePersonGroupTrainingStatusParameters,\n TrainLargePersonGroupParameters,\n CreateLargePersonGroupPersonParameters,\n GetLargePersonGroupPersonsParameters,\n DeleteLargePersonGroupPersonParameters,\n GetLargePersonGroupPersonParameters,\n UpdateLargePersonGroupPersonParameters,\n AddLargePersonGroupPersonFaceFromUrlParameters,\n AddLargePersonGroupPersonFaceParameters,\n DeleteLargePersonGroupPersonFaceParameters,\n GetLargePersonGroupPersonFaceParameters,\n UpdateLargePersonGroupPersonFaceParameters,\n CreateLivenessSessionParameters,\n GetLivenessSessionsParameters,\n DeleteLivenessSessionParameters,\n GetLivenessSessionResultParameters,\n GetLivenessSessionAuditEntriesParameters,\n CreateLivenessWithVerifySessionWithVerifyImageParameters,\n CreateLivenessWithVerifySessionParameters,\n GetLivenessWithVerifySessionsParameters,\n DeleteLivenessWithVerifySessionParameters,\n GetLivenessWithVerifySessionResultParameters,\n GetLivenessWithVerifySessionAuditEntriesParameters,\n GetSessionImageParameters,\n CreatePersonParameters,\n GetPersonsParameters,\n DeletePersonParameters,\n GetPersonParameters,\n UpdatePersonParameters,\n GetDynamicPersonGroupReferencesParameters,\n AddPersonFaceParameters,\n AddPersonFaceFromUrlParameters,\n GetPersonFacesParameters,\n DeletePersonFaceParameters,\n GetPersonFaceParameters,\n UpdatePersonFaceParameters,\n CreateDynamicPersonGroupWithPersonParameters,\n CreateDynamicPersonGroupParameters,\n DeleteDynamicPersonGroupParameters,\n GetDynamicPersonGroupParameters,\n UpdateDynamicPersonGroupWithPersonChangesParameters,\n UpdateDynamicPersonGroupParameters,\n GetDynamicPersonGroupsParameters,\n GetDynamicPersonGroupPersonsParameters,\n} from \"./parameters.js\";\nimport type {\n GetOperationResult200Response,\n GetOperationResultDefaultResponse,\n DetectFromUrl200Response,\n DetectFromUrlDefaultResponse,\n Detect200Response,\n DetectDefaultResponse,\n DetectFromSessionImageId200Response,\n DetectFromSessionImageIdDefaultResponse,\n FindSimilar200Response,\n FindSimilarDefaultResponse,\n FindSimilarFromFaceList200Response,\n FindSimilarFromFaceListDefaultResponse,\n FindSimilarFromLargeFaceList200Response,\n FindSimilarFromLargeFaceListDefaultResponse,\n IdentifyFromPersonGroup200Response,\n IdentifyFromPersonGroupDefaultResponse,\n IdentifyFromLargePersonGroup200Response,\n IdentifyFromLargePersonGroupDefaultResponse,\n IdentifyFromPersonDirectory200Response,\n IdentifyFromPersonDirectoryDefaultResponse,\n IdentifyFromDynamicPersonGroup200Response,\n IdentifyFromDynamicPersonGroupDefaultResponse,\n VerifyFaceToFace200Response,\n VerifyFaceToFaceDefaultResponse,\n VerifyFromPersonGroup200Response,\n VerifyFromPersonGroupDefaultResponse,\n VerifyFromLargePersonGroup200Response,\n VerifyFromLargePersonGroupDefaultResponse,\n VerifyFromPersonDirectory200Response,\n VerifyFromPersonDirectoryDefaultResponse,\n Group200Response,\n GroupDefaultResponse,\n CreateFaceList200Response,\n CreateFaceListDefaultResponse,\n DeleteFaceList200Response,\n DeleteFaceListDefaultResponse,\n GetFaceList200Response,\n GetFaceListDefaultResponse,\n UpdateFaceList200Response,\n UpdateFaceListDefaultResponse,\n GetFaceLists200Response,\n GetFaceListsDefaultResponse,\n AddFaceListFaceFromUrl200Response,\n AddFaceListFaceFromUrlDefaultResponse,\n AddFaceListFace200Response,\n AddFaceListFaceDefaultResponse,\n DeleteFaceListFace200Response,\n DeleteFaceListFaceDefaultResponse,\n CreateLargeFaceList200Response,\n CreateLargeFaceListDefaultResponse,\n DeleteLargeFaceList200Response,\n DeleteLargeFaceListDefaultResponse,\n GetLargeFaceList200Response,\n GetLargeFaceListDefaultResponse,\n UpdateLargeFaceList200Response,\n UpdateLargeFaceListDefaultResponse,\n GetLargeFaceLists200Response,\n GetLargeFaceListsDefaultResponse,\n GetLargeFaceListTrainingStatus200Response,\n GetLargeFaceListTrainingStatusDefaultResponse,\n TrainLargeFaceList202Response,\n TrainLargeFaceListDefaultResponse,\n AddLargeFaceListFaceFromUrl200Response,\n AddLargeFaceListFaceFromUrlDefaultResponse,\n AddLargeFaceListFace200Response,\n AddLargeFaceListFaceDefaultResponse,\n GetLargeFaceListFaces200Response,\n GetLargeFaceListFacesDefaultResponse,\n DeleteLargeFaceListFace200Response,\n DeleteLargeFaceListFaceDefaultResponse,\n GetLargeFaceListFace200Response,\n GetLargeFaceListFaceDefaultResponse,\n UpdateLargeFaceListFace200Response,\n UpdateLargeFaceListFaceDefaultResponse,\n CreatePersonGroup200Response,\n CreatePersonGroupDefaultResponse,\n DeletePersonGroup200Response,\n DeletePersonGroupDefaultResponse,\n GetPersonGroup200Response,\n GetPersonGroupDefaultResponse,\n UpdatePersonGroup200Response,\n UpdatePersonGroupDefaultResponse,\n GetPersonGroups200Response,\n GetPersonGroupsDefaultResponse,\n GetPersonGroupTrainingStatus200Response,\n GetPersonGroupTrainingStatusDefaultResponse,\n TrainPersonGroup202Response,\n TrainPersonGroupDefaultResponse,\n CreatePersonGroupPerson200Response,\n CreatePersonGroupPersonDefaultResponse,\n GetPersonGroupPersons200Response,\n GetPersonGroupPersonsDefaultResponse,\n DeletePersonGroupPerson200Response,\n DeletePersonGroupPersonDefaultResponse,\n GetPersonGroupPerson200Response,\n GetPersonGroupPersonDefaultResponse,\n UpdatePersonGroupPerson200Response,\n UpdatePersonGroupPersonDefaultResponse,\n AddPersonGroupPersonFaceFromUrl200Response,\n AddPersonGroupPersonFaceFromUrlDefaultResponse,\n AddPersonGroupPersonFace200Response,\n AddPersonGroupPersonFaceDefaultResponse,\n DeletePersonGroupPersonFace200Response,\n DeletePersonGroupPersonFaceDefaultResponse,\n GetPersonGroupPersonFace200Response,\n GetPersonGroupPersonFaceDefaultResponse,\n UpdatePersonGroupPersonFace200Response,\n UpdatePersonGroupPersonFaceDefaultResponse,\n CreateLargePersonGroup200Response,\n CreateLargePersonGroupDefaultResponse,\n DeleteLargePersonGroup200Response,\n DeleteLargePersonGroupDefaultResponse,\n GetLargePersonGroup200Response,\n GetLargePersonGroupDefaultResponse,\n UpdateLargePersonGroup200Response,\n UpdateLargePersonGroupDefaultResponse,\n GetLargePersonGroups200Response,\n GetLargePersonGroupsDefaultResponse,\n GetLargePersonGroupTrainingStatus200Response,\n GetLargePersonGroupTrainingStatusDefaultResponse,\n TrainLargePersonGroup202Response,\n TrainLargePersonGroupDefaultResponse,\n CreateLargePersonGroupPerson200Response,\n CreateLargePersonGroupPersonDefaultResponse,\n GetLargePersonGroupPersons200Response,\n GetLargePersonGroupPersonsDefaultResponse,\n DeleteLargePersonGroupPerson200Response,\n DeleteLargePersonGroupPersonDefaultResponse,\n GetLargePersonGroupPerson200Response,\n GetLargePersonGroupPersonDefaultResponse,\n UpdateLargePersonGroupPerson200Response,\n UpdateLargePersonGroupPersonDefaultResponse,\n AddLargePersonGroupPersonFaceFromUrl200Response,\n AddLargePersonGroupPersonFaceFromUrlDefaultResponse,\n AddLargePersonGroupPersonFace200Response,\n AddLargePersonGroupPersonFaceDefaultResponse,\n DeleteLargePersonGroupPersonFace200Response,\n DeleteLargePersonGroupPersonFaceDefaultResponse,\n GetLargePersonGroupPersonFace200Response,\n GetLargePersonGroupPersonFaceDefaultResponse,\n UpdateLargePersonGroupPersonFace200Response,\n UpdateLargePersonGroupPersonFaceDefaultResponse,\n CreateLivenessSession200Response,\n CreateLivenessSessionDefaultResponse,\n GetLivenessSessions200Response,\n GetLivenessSessionsDefaultResponse,\n DeleteLivenessSession200Response,\n DeleteLivenessSessionDefaultResponse,\n GetLivenessSessionResult200Response,\n GetLivenessSessionResultDefaultResponse,\n GetLivenessSessionAuditEntries200Response,\n GetLivenessSessionAuditEntriesDefaultResponse,\n CreateLivenessWithVerifySessionWithVerifyImage200Response,\n CreateLivenessWithVerifySessionWithVerifyImageDefaultResponse,\n CreateLivenessWithVerifySession200Response,\n CreateLivenessWithVerifySessionDefaultResponse,\n GetLivenessWithVerifySessions200Response,\n GetLivenessWithVerifySessionsDefaultResponse,\n DeleteLivenessWithVerifySession200Response,\n DeleteLivenessWithVerifySessionDefaultResponse,\n GetLivenessWithVerifySessionResult200Response,\n GetLivenessWithVerifySessionResultDefaultResponse,\n GetLivenessWithVerifySessionAuditEntries200Response,\n GetLivenessWithVerifySessionAuditEntriesDefaultResponse,\n GetSessionImage200Response,\n GetSessionImageDefaultResponse,\n CreatePerson202Response,\n CreatePersonDefaultResponse,\n GetPersons200Response,\n GetPersonsDefaultResponse,\n DeletePerson202Response,\n DeletePersonDefaultResponse,\n GetPerson200Response,\n GetPersonDefaultResponse,\n UpdatePerson200Response,\n UpdatePersonDefaultResponse,\n GetDynamicPersonGroupReferences200Response,\n GetDynamicPersonGroupReferencesDefaultResponse,\n AddPersonFace202Response,\n AddPersonFaceDefaultResponse,\n AddPersonFaceFromUrl202Response,\n AddPersonFaceFromUrlDefaultResponse,\n GetPersonFaces200Response,\n GetPersonFacesDefaultResponse,\n DeletePersonFace202Response,\n DeletePersonFaceDefaultResponse,\n GetPersonFace200Response,\n GetPersonFaceDefaultResponse,\n UpdatePersonFace200Response,\n UpdatePersonFaceDefaultResponse,\n CreateDynamicPersonGroupWithPerson202Response,\n CreateDynamicPersonGroupWithPersonDefaultResponse,\n CreateDynamicPersonGroup200Response,\n CreateDynamicPersonGroupDefaultResponse,\n DeleteDynamicPersonGroup202Response,\n DeleteDynamicPersonGroupDefaultResponse,\n GetDynamicPersonGroup200Response,\n GetDynamicPersonGroupDefaultResponse,\n UpdateDynamicPersonGroupWithPersonChanges202Response,\n UpdateDynamicPersonGroupWithPersonChangesDefaultResponse,\n UpdateDynamicPersonGroup200Response,\n UpdateDynamicPersonGroupDefaultResponse,\n GetDynamicPersonGroups200Response,\n GetDynamicPersonGroupsDefaultResponse,\n GetDynamicPersonGroupPersons200Response,\n GetDynamicPersonGroupPersonsDefaultResponse,\n} from \"./responses.js\";\nimport type { RecognitionModel } from \"./models.js\";\nimport type { Client, StreamableMethod } from \"@azure-rest/core-client\";\n\nexport interface GetOperationResult {\n /** Get status of a long running operation. */\n get(\n options?: GetOperationResultParameters,\n ): StreamableMethod<GetOperationResult200Response | GetOperationResultDefaultResponse>;\n}\n\nexport interface DetectFromUrl {\n /**\n * > [!IMPORTANT]\n * > Microsoft has retired or limited facial recognition capabilities that can be used to try to infer emotional states and identity attributes which, if misused, can subject people to stereotyping, discrimination or unfair denial of services. The retired capabilities are emotion and gender. The limited capabilities are age, smile, facial hair, hair and makeup. Email [Azure Face API](mailto:azureface@microsoft.com) if you have a responsible use case that would benefit from the use of any of the limited capabilities. Read more about this decision [here](https://azure.microsoft.com/blog/responsible-ai-investments-and-safeguards-for-facial-recognition/).\n *\n * *\n * * No image will be stored. Only the extracted face feature(s) will be stored on server. The faceId is an identifier of the face feature and will be used in \"Identify\", \"Verify\", and \"Find Similar\". The stored face features will expire and be deleted at the time specified by faceIdTimeToLive after the original detection call.\n * * Optional parameters include faceId, landmarks, and attributes. Attributes include headPose, glasses, occlusion, accessories, blur, exposure, noise, mask, and qualityForRecognition. Some of the results returned for specific attributes may not be highly accurate.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from large to small.\n * * For optimal results when querying \"Identify\", \"Verify\", and \"Find Similar\" ('returnFaceId' is true), please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels (100 pixels between eyes).\n * * Different 'detectionModel' values can be provided. The availability of landmarks and supported attributes depends on the detection model specified. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n * * Different 'recognitionModel' values are provided. If follow-up operations like \"Verify\", \"Identify\", \"Find Similar\" are needed, please specify the recognition model with 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More details, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-recognition-model).\n */\n post(\n options: DetectFromUrlParameters,\n ): StreamableMethod<DetectFromUrl200Response | DetectFromUrlDefaultResponse>;\n /**\n * > [!IMPORTANT]\n * > Microsoft has retired or limited facial recognition capabilities that can be used to try to infer emotional states and identity attributes which, if misused, can subject people to stereotyping, discrimination or unfair denial of services. The retired capabilities are emotion and gender. The limited capabilities are age, smile, facial hair, hair and makeup. Email [Azure Face API](mailto:azureface@microsoft.com) if you have a responsible use case that would benefit from the use of any of the limited capabilities. Read more about this decision [here](https://azure.microsoft.com/blog/responsible-ai-investments-and-safeguards-for-facial-recognition/).\n *\n * *\n * * No image will be stored. Only the extracted face feature(s) will be stored on server. The faceId is an identifier of the face feature and will be used in \"Identify\", \"Verify\", and \"Find Similar\". The stored face features will expire and be deleted at the time specified by faceIdTimeToLive after the original detection call.\n * * Optional parameters include faceId, landmarks, and attributes. Attributes include headPose, glasses, occlusion, accessories, blur, exposure, noise, mask, and qualityForRecognition. Some of the results returned for specific attributes may not be highly accurate.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from large to small.\n * * For optimal results when querying \"Identify\", \"Verify\", and \"Find Similar\" ('returnFaceId' is true), please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels (100 pixels between eyes).\n * * Different 'detectionModel' values can be provided. The availability of landmarks and supported attributes depends on the detection model specified. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n * * Different 'recognitionModel' values are provided. If follow-up operations like \"Verify\", \"Identify\", \"Find Similar\" are needed, please specify the recognition model with 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More details, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-recognition-model).\n */\n post(options: DetectParameters): StreamableMethod<Detect200Response | DetectDefaultResponse>;\n /**\n * > [!IMPORTANT]\n * > Microsoft has retired or limited facial recognition capabilities that can be used to try to infer emotional states and identity attributes which, if misused, can subject people to stereotyping, discrimination or unfair denial of services. The retired capabilities are emotion and gender. The limited capabilities are age, smile, facial hair, hair and makeup. Email [Azure Face API](mailto:azureface@microsoft.com) if you have a responsible use case that would benefit from the use of any of the limited capabilities. Read more about this decision [here](https://azure.microsoft.com/blog/responsible-ai-investments-and-safeguards-for-facial-recognition/).\n *\n * *\n * * No image will be stored. Only the extracted face feature(s) will be stored on server. The faceId is an identifier of the face feature and will be used in \"Identify\", \"Verify\", and \"Find Similar\". The stored face features will expire and be deleted at the time specified by faceIdTimeToLive after the original detection call.\n * * Optional parameters include faceId, landmarks, and attributes. Attributes include headPose, glasses, occlusion, accessories, blur, exposure, noise, mask, and qualityForRecognition. Some of the results returned for specific attributes may not be highly accurate.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from large to small.\n * * For optimal results when querying \"Identify\", \"Verify\", and \"Find Similar\" ('returnFaceId' is true), please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels (100 pixels between eyes).\n * * Different 'detectionModel' values can be provided. The availability of landmarks and supported attributes depends on the detection model specified. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n * * Different 'recognitionModel' values are provided. If follow-up operations like \"Verify\", \"Identify\", \"Find Similar\" are needed, please specify the recognition model with 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More details, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-recognition-model).\n */\n post(\n options: DetectFromSessionImageIdParameters,\n ): StreamableMethod<\n DetectFromSessionImageId200Response | DetectFromSessionImageIdDefaultResponse\n >;\n}\n\nexport interface FindSimilar {\n /**\n * Depending on the input the returned similar faces list contains faceIds or persistedFaceIds ranked by similarity.\n *\n * Find similar has two working modes, \"matchPerson\" and \"matchFace\". \"matchPerson\" is the default mode that it tries to find faces of the same person as possible by using internal same-person thresholds. It is useful to find a known person's other photos. Note that an empty list will be returned if no faces pass the internal thresholds. \"matchFace\" mode ignores same-person thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used in the cases like searching celebrity-looking faces.\n *\n * The 'recognitionModel' associated with the query faceId should be the same as the 'recognitionModel' used by the target faceId array.\n */\n post(\n options: FindSimilarParameters,\n ): StreamableMethod<FindSimilar200Response | FindSimilarDefaultResponse>;\n /**\n * Depending on the input the returned similar faces list contains faceIds or persistedFaceIds ranked by similarity.\n *\n * Find similar has two working modes, \"matchPerson\" and \"matchFace\". \"matchPerson\" is the default mode that it tries to find faces of the same person as possible by using internal same-person thresholds. It is useful to find a known person's other photos. Note that an empty list will be returned if no faces pass the internal thresholds. \"matchFace\" mode ignores same-person thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used in the cases like searching celebrity-looking faces.\n *\n * The 'recognitionModel' associated with the query faceId should be the same as the 'recognitionModel' used by the target Face List.\n */\n post(\n options: FindSimilarFromFaceListParameters,\n ): StreamableMethod<FindSimilarFromFaceList200Response | FindSimilarFromFaceListDefaultResponse>;\n /**\n * Depending on the input the returned similar faces list contains faceIds or persistedFaceIds ranked by similarity.\n *\n * Find similar has two working modes, \"matchPerson\" and \"matchFace\". \"matchPerson\" is the default mode that it tries to find faces of the same person as possible by using internal same-person thresholds. It is useful to find a known person's other photos. Note that an empty list will be returned if no faces pass the internal thresholds. \"matchFace\" mode ignores same-person thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used in the cases like searching celebrity-looking faces.\n *\n * The 'recognitionModel' associated with the query faceId should be the same as the 'recognitionModel' used by the target Large Face List.\n */\n post(\n options: FindSimilarFromLargeFaceListParameters,\n ): StreamableMethod<\n FindSimilarFromLargeFaceList200Response | FindSimilarFromLargeFaceListDefaultResponse\n >;\n}\n\nexport interface IdentifyFromPersonGroup {\n /**\n * For each face in the faceIds array, Face Identify will compute similarities between the query face and all the faces in the Person Group (given by personGroupId), and return candidate person(s) for that face ranked by similarity confidence. The Person Group should be trained to make it ready for identification. See more in \"Train Person Group\".\n * > [!NOTE]\n * >\n * > *\n * > * The algorithm allows more than one face to be identified independently at the same request, but no more than 10 faces.\n * > * Each person could have more than one face, but no more than 248 faces.\n * > * Higher face image quality means better identification precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * > * Number of candidates returned is restricted by maxNumOfCandidatesReturned and confidenceThreshold. If no person is identified, the returned candidates will be an empty array.\n * > * Try \"Find Similar\" when you need to find similar faces from a Face List/Large Face List instead of a Person Group.\n * > * The 'recognitionModel' associated with the query faces' faceIds should be the same as the 'recognitionModel' used by the target Person Group.\n */\n post(\n options: IdentifyFromPersonGroupParameters,\n ): StreamableMethod<IdentifyFromPersonGroup200Response | IdentifyFromPersonGroupDefaultResponse>;\n /**\n * For each face in the faceIds array, Face Identify will compute similarities between the query face and all the faces in the Large Person Group (given by largePersonGroupId), and return candidate person(s) for that face ranked by similarity confidence. The Large Person Group should be trained to make it ready for identification. See more in \"Train Large Person Group\".\n * > [!NOTE]\n * >\n * > *\n * > * The algorithm allows more than one face to be identified independently at the same request, but no more than 10 faces.\n * > * Each person could have more than one face, but no more than 248 faces.\n * > * Higher face image quality means better identification precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * > * Number of candidates returned is restricted by maxNumOfCandidatesReturned and confidenceThreshold. If no person is identified, the returned candidates will be an empty array.\n * > * Try \"Find Similar\" when you need to find similar faces from a Face List/Large Face List instead of a Person Group/Large Person Group.\n * > * The 'recognitionModel' associated with the query faces' faceIds should be the same as the 'recognitionModel' used by the target Person Group or Large Person Group.\n */\n post(\n options: IdentifyFromLargePersonGroupParameters,\n ): StreamableMethod<\n IdentifyFromLargePersonGroup200Response | IdentifyFromLargePersonGroupDefaultResponse\n >;\n /**\n * For each face in the faceIds array, Face Identify will compute similarities between the query face and all the faces in the Person Directory Persons (given by personIds), and return candidate person(s) for that face ranked by similarity confidence.\n * Passing personIds with an array with one element \"*\" can perform the operation over entire person directory.\n * > [!NOTE]\n * >\n * > *\n * > * The algorithm allows more than one face to be identified independently at the same request, but no more than 10 faces.\n * > * Each person could have more than one face, but no more than 248 faces.\n * > * Higher face image quality means better identification precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * > * Number of candidates returned is restricted by maxNumOfCandidatesReturned and confidenceThreshold. If no person is identified, the returned candidates will be an empty array.\n * > * The Identify operation can only match faces obtained with the same recognition model, that is associated with the query faces.\n */\n post(\n options: IdentifyFromPersonDirectoryParameters,\n ): StreamableMethod<\n IdentifyFromPersonDirectory200Response | IdentifyFromPersonDirectoryDefaultResponse\n >;\n /**\n * For each face in the faceIds array, Face Identify will compute similarities between the query face and all the faces in the Dynamic Person Group (given by dynamicPersonGroupId), and return candidate person(s) for that face ranked by similarity confidence.\n * > [!NOTE]\n * >\n * > *\n * > * The algorithm allows more than one face to be identified independently at the same request, but no more than 10 faces.\n * > * Each person could have more than one face, but no more than 248 faces.\n * > * Higher face image quality means better identification precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * > * Number of candidates returned is restricted by maxNumOfCandidatesReturned and confidenceThreshold. If no person is identified, the returned candidates will be an empty array.\n * > * The Identify operation can only match faces obtained with the same recognition model, that is associated with the query faces.\n */\n post(\n options: IdentifyFromDynamicPersonGroupParameters,\n ): StreamableMethod<\n IdentifyFromDynamicPersonGroup200Response | IdentifyFromDynamicPersonGroupDefaultResponse\n >;\n}\n\nexport interface VerifyFaceToFace {\n /**\n * > [!NOTE]\n * >\n * > *\n * > * Higher face image quality means better identification precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * > * For the scenarios that are sensitive to accuracy please make your own judgment.\n * > * The 'recognitionModel' associated with the both faces should be the same.\n */\n post(\n options: VerifyFaceToFaceParameters,\n ): StreamableMethod<VerifyFaceToFace200Response | VerifyFaceToFaceDefaultResponse>;\n /**\n * > [!NOTE]\n * >\n * > *\n * > * Higher face image quality means better identification precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * > * For the scenarios that are sensitive to accuracy please make your own judgment.\n * > * The 'recognitionModel' associated with the query face should be the same as the 'recognitionModel' used by the Person Group.\n */\n post(\n options: VerifyFromPersonGroupParameters,\n ): StreamableMethod<VerifyFromPersonGroup200Response | VerifyFromPersonGroupDefaultResponse>;\n /**\n * > [!NOTE]\n * >\n * > *\n * > * Higher face image quality means better identification precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * > * For the scenarios that are sensitive to accuracy please make your own judgment.\n * > * The 'recognitionModel' associated with the query face should be the same as the 'recognitionModel' used by the Large Person Group.\n */\n post(\n options: VerifyFromLargePersonGroupParameters,\n ): StreamableMethod<\n VerifyFromLargePersonGroup200Response | VerifyFromLargePersonGroupDefaultResponse\n >;\n /**\n * > [!NOTE]\n * >\n * > *\n * > * Higher face image quality means better identification precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * > * For the scenarios that are sensitive to accuracy please make your own judgment.\n * > * The Verify operation can only match faces obtained with the same recognition model, that is associated with the query face.\n */\n post(\n options: VerifyFromPersonDirectoryParameters,\n ): StreamableMethod<\n VerifyFromPersonDirectory200Response | VerifyFromPersonDirectoryDefaultResponse\n >;\n}\n\nexport interface Group {\n /**\n * >\n * *\n * * The output is one or more disjointed face groups and a messyGroup. A face group contains faces that have similar looking, often of the same person. Face groups are ranked by group size, i.e. number of faces. Notice that faces belonging to a same person might be split into several groups in the result.\n * * MessyGroup is a special face group containing faces that cannot find any similar counterpart face from original faces. The messyGroup will not appear in the result if all faces found their counterparts.\n * * Group API needs at least 2 candidate faces and 1000 at most. We suggest to try \"Verify Face To Face\" when you only have 2 candidate faces.\n * * The 'recognitionModel' associated with the query faces' faceIds should be the same.\n */\n post(options: GroupParameters): StreamableMethod<Group200Response | GroupDefaultResponse>;\n}\n\nexport interface CreateFaceList {\n /**\n * Up to 64 Face Lists are allowed in one subscription.\n *\n * Face List is a list of faces, up to 1,000 faces, and used by \"Find Similar From Face List\".\n *\n * After creation, user should use \"Add Face List Face\" to import the faces. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Face List\" is called.\n *\n * \"Find Similar\" is used for scenario like finding celebrity-like faces, similar face filtering, or as a light way face identification. But if the actual use is to identify person, please use Person Group / Large Person Group and \"Identify\".\n *\n * Please consider Large Face List when the face number is large. It can support up to 1,000,000 faces.\n */\n put(\n options: CreateFaceListParameters,\n ): StreamableMethod<CreateFaceList200Response | CreateFaceListDefaultResponse>;\n /** Delete a specified Face List. */\n delete(\n options?: DeleteFaceListParameters,\n ): StreamableMethod<DeleteFaceList200Response | DeleteFaceListDefaultResponse>;\n /** Retrieve a Face List's faceListId, name, userData, recognitionModel and faces in the Face List. */\n get(\n options?: GetFaceListParameters,\n ): StreamableMethod<GetFaceList200Response | GetFaceListDefaultResponse>;\n /** Update information of a Face List, including name and userData. */\n patch(\n options: UpdateFaceListParameters,\n ): StreamableMethod<UpdateFaceList200Response | UpdateFaceListDefaultResponse>;\n}\n\nexport interface GetFaceLists {\n /**\n * List Face Lists' faceListId, name, userData and recognitionModel.\n *\n * To get face information inside Face List use \"Get Face List\".\n */\n get(\n options?: GetFaceListsParameters,\n ): StreamableMethod<GetFaceLists200Response | GetFaceListsDefaultResponse>;\n}\n\nexport interface AddFaceListFaceFromUrl {\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Face List Face\" or \"Delete Face List\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n */\n post(\n options: AddFaceListFaceFromUrlParameters,\n ): StreamableMethod<AddFaceListFaceFromUrl200Response | AddFaceListFaceFromUrlDefaultResponse>;\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Face List Face\" or \"Delete Face List\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n */\n post(\n options: AddFaceListFaceParameters,\n ): StreamableMethod<AddFaceListFace200Response | AddFaceListFaceDefaultResponse>;\n}\n\nexport interface DeleteFaceListFace {\n /** Adding/deleting faces to/from a same Face List are processed sequentially and to/from different Face Lists are in parallel. */\n delete(\n options?: DeleteFaceListFaceParameters,\n ): StreamableMethod<DeleteFaceListFace200Response | DeleteFaceListFaceDefaultResponse>;\n}\n\nexport interface CreateLargeFaceList {\n /**\n * Large Face List is a list of faces, up to 1,000,000 faces, and used by \"Find Similar From Large Face List\".\n *\n * After creation, user should use Add Large Face List Face to import the faces and Train Large Face List to make it ready for \"Find Similar\". No image will be stored. Only the extracted face feature(s) will be stored on server until Delete Large Face List is called.\n *\n * \"Find Similar\" is used for scenario like finding celebrity-like faces, similar face filtering, or as a light way face identification. But if the actual use is to identify person, please use Person Group / Large Person Group and \"Identify\".\n *\n * > [!NOTE]\n * >\n * > *\n * > * Free-tier subscription quota: 64 Large Face Lists.\n * > * S0-tier subscription quota: 1,000,000 Large Face Lists.\n */\n put(\n options: CreateLargeFaceListParameters,\n ): StreamableMethod<CreateLargeFaceList200Response | CreateLargeFaceListDefaultResponse>;\n /** Adding/deleting faces to/from a same Large Face List are processed sequentially and to/from different Large Face Lists are in parallel. */\n delete(\n options?: DeleteLargeFaceListParameters,\n ): StreamableMethod<DeleteLargeFaceList200Response | DeleteLargeFaceListDefaultResponse>;\n /** Retrieve a Large Face List's largeFaceListId, name, userData and recognitionModel. */\n get(\n options?: GetLargeFaceListParameters,\n ): StreamableMethod<GetLargeFaceList200Response | GetLargeFaceListDefaultResponse>;\n /** Update information of a Large Face List, including name and userData. */\n patch(\n options: UpdateLargeFaceListParameters,\n ): StreamableMethod<UpdateLargeFaceList200Response | UpdateLargeFaceListDefaultResponse>;\n}\n\nexport interface GetLargeFaceLists {\n /**\n * To get face information inside largeFaceList use \"Get Large Face List Face\".\n *\n * Large Face Lists are stored in alphabetical order of largeFaceListId.\n * >\n * *\n * * \"start\" parameter (string, optional) specifies an ID value from which returned entries will have larger IDs based on string comparison. Setting \"start\" to an empty value indicates that entries should be returned starting from the first item.\n * * \"top\" parameter (int, optional) determines the maximum number of entries to be returned, with a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, specify \"start\" with the personId of the last entry returned in the current call.\n *\n * > [!TIP]\n * >\n * > * For example, there are total 5 items with their IDs: \"itemId1\", ..., \"itemId5\".\n * > * \"start=&top=\" will return all 5 items.\n * > * \"start=&top=2\" will return \"itemId1\", \"itemId2\".\n * > * \"start=itemId2&top=3\" will return \"itemId3\", \"itemId4\", \"itemId5\".\n */\n get(\n options?: GetLargeFaceListsParameters,\n ): StreamableMethod<GetLargeFaceLists200Response | GetLargeFaceListsDefaultResponse>;\n}\n\nexport interface GetLargeFaceListTrainingStatus {\n /**\n * To check the Large Face List training status completed or still ongoing. Large Face List training is an asynchronous operation triggered by \"Train Large Face List\".\n *\n * Training time depends on the number of face entries in a Large Face List. It could be in seconds, or up to half an hour for 1,000,000 faces.\n */\n get(\n options?: GetLargeFaceListTrainingStatusParameters,\n ): StreamableMethod<\n GetLargeFaceListTrainingStatus200Response | GetLargeFaceListTrainingStatusDefaultResponse\n >;\n}\n\nexport interface TrainLargeFaceList {\n /**\n * Training is a crucial step that only a trained Large Face List can be used by \"Find Similar From Large Face List\".\n *\n * The training task is an asynchronous task. Training time depends on the number of face entries in a Large Face List. It could be in seconds, or up to half an hour for 1,000,000 faces. To check training completion, please use \"Get Large Face List Training Status\".\n */\n post(\n options?: TrainLargeFaceListParameters,\n ): StreamableMethod<TrainLargeFaceList202Response | TrainLargeFaceListDefaultResponse>;\n}\n\nexport interface AddLargeFaceListFaceFromUrl {\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Large Face List Face\" or \"Delete Large Face List\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n *\n * > [!NOTE]\n * >\n * > *\n * > * Free-tier subscription quota: 1,000 faces per Large Face List.\n * > * S0-tier subscription quota: 1,000,000 faces per Large Face List.\n */\n post(\n options: AddLargeFaceListFaceFromUrlParameters,\n ): StreamableMethod<\n AddLargeFaceListFaceFromUrl200Response | AddLargeFaceListFaceFromUrlDefaultResponse\n >;\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Large Face List Face\" or \"Delete Large Face List\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n *\n * > [!NOTE]\n * >\n * > *\n * > * Free-tier subscription quota: 1,000 faces per Large Face List.\n * > * S0-tier subscription quota: 1,000,000 faces per Large Face List.\n */\n post(\n options: AddLargeFaceListFaceParameters,\n ): StreamableMethod<AddLargeFaceListFace200Response | AddLargeFaceListFaceDefaultResponse>;\n /**\n * Faces are stored in alphabetical order of persistedFaceId created in \"Add Large Face List Face\".\n * >\n * *\n * * \"start\" parameter (string, optional) specifies an ID value from which returned entries will have larger IDs based on string comparison. Setting \"start\" to an empty value indicates that entries should be returned starting from the first item.\n * * \"top\" parameter (int, optional) determines the maximum number of entries to be returned, with a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, specify \"start\" with the personId of the last entry returned in the current call.\n *\n * > [!TIP]\n * >\n * > * For example, there are total 5 items with their IDs: \"itemId1\", ..., \"itemId5\".\n * > * \"start=&top=\" will return all 5 items.\n * > * \"start=&top=2\" will return \"itemId1\", \"itemId2\".\n * > * \"start=itemId2&top=3\" will return \"itemId3\", \"itemId4\", \"itemId5\".\n */\n get(\n options?: GetLargeFaceListFacesParameters,\n ): StreamableMethod<GetLargeFaceListFaces200Response | GetLargeFaceListFacesDefaultResponse>;\n}\n\nexport interface DeleteLargeFaceListFace {\n /** Delete a face from a Large Face List by specified largeFaceListId and persistedFaceId. */\n delete(\n options?: DeleteLargeFaceListFaceParameters,\n ): StreamableMethod<DeleteLargeFaceListFace200Response | DeleteLargeFaceListFaceDefaultResponse>;\n /** Retrieve persisted face in Large Face List by largeFaceListId and persistedFaceId. */\n get(\n options?: GetLargeFaceListFaceParameters,\n ): StreamableMethod<GetLargeFaceListFace200Response | GetLargeFaceListFaceDefaultResponse>;\n /** Update a specified face's userData field in a Large Face List by its persistedFaceId. */\n patch(\n options: UpdateLargeFaceListFaceParameters,\n ): StreamableMethod<UpdateLargeFaceListFace200Response | UpdateLargeFaceListFaceDefaultResponse>;\n}\n\nexport interface CreatePersonGroup {\n /**\n * A Person Group is a container holding the uploaded person data, including face recognition features.\n *\n * After creation, use \"Create Person Group Person\" to add persons into the group, and then call \"Train Person Group\" to get this group ready for \"Identify From Person Group\".\n *\n * No image will be stored. Only the person's extracted face feature(s) and userData will be stored on server until \"Delete Person Group Person\" or \"Delete Person Group\" is called.\n *\n * 'recognitionModel' should be specified to associate with this Person Group. The default value for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly specify the model you need in this parameter. New faces that are added to an existing Person Group will use the recognition model that's already associated with the collection. Existing face feature(s) in a Person Group can't be updated to features extracted by another version of recognition model.\n *\n * > [!NOTE]\n * >\n * > *\n * > * Free-tier subscription quota: 1,000 Person Groups. Each holds up to 1,000 persons.\n * > * S0-tier subscription quota: 1,000,000 Person Groups. Each holds up to 10,000 persons.\n * > * to handle larger scale face identification problem, please consider using Large Person Group.\n */\n put(\n options: CreatePersonGroupParameters,\n ): StreamableMethod<CreatePersonGroup200Response | CreatePersonGroupDefaultResponse>;\n /** Delete an existing Person Group with specified personGroupId. Persisted data in this Person Group will be deleted. */\n delete(\n options?: DeletePersonGroupParameters,\n ): StreamableMethod<DeletePersonGroup200Response | DeletePersonGroupDefaultResponse>;\n /** Retrieve Person Group name, userData and recognitionModel. To get person information under this personGroup, use \"Get Person Group Persons\". */\n get(\n options?: GetPersonGroupParameters,\n ): StreamableMethod<GetPersonGroup200Response | GetPersonGroupDefaultResponse>;\n /** Update an existing Person Group's name and userData. The properties keep unchanged if they are not in request body. */\n patch(\n options: UpdatePersonGroupParameters,\n ): StreamableMethod<UpdatePersonGroup200Response | UpdatePersonGroupDefaultResponse>;\n}\n\nexport interface GetPersonGroups {\n /**\n * Person Groups are stored in alphabetical order of personGroupId.\n * >\n * *\n * * \"start\" parameter (string, optional) specifies an ID value from which returned entries will have larger IDs based on string comparison. Setting \"start\" to an empty value indicates that entries should be returned starting from the first item.\n * * \"top\" parameter (int, optional) determines the maximum number of entries to be returned, with a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, specify \"start\" with the personId of the last entry returned in the current call.\n *\n * > [!TIP]\n * >\n * > * For example, there are total 5 items with their IDs: \"itemId1\", ..., \"itemId5\".\n * > * \"start=&top=\" will return all 5 items.\n * > * \"start=&top=2\" will return \"itemId1\", \"itemId2\".\n * > * \"start=itemId2&top=3\" will return \"itemId3\", \"itemId4\", \"itemId5\".\n */\n get(\n options?: GetPersonGroupsParameters,\n ): StreamableMethod<GetPersonGroups200Response | GetPersonGroupsDefaultResponse>;\n}\n\nexport interface GetPersonGroupTrainingStatus {\n /** To check Person Group training status completed or still ongoing. Person Group training is an asynchronous operation triggered by \"Train Person Group\" API. */\n get(\n options?: GetPersonGroupTrainingStatusParameters,\n ): StreamableMethod<\n GetPersonGroupTrainingStatus200Response | GetPersonGroupTrainingStatusDefaultResponse\n >;\n}\n\nexport interface TrainPersonGroup {\n /** The training task is an asynchronous task. Training time depends on the number of person entries, and their faces in a Person Group. It could be several seconds to minutes. To check training status, please use \"Get Person Group Training Status\". */\n post(\n options?: TrainPersonGroupParameters,\n ): StreamableMethod<TrainPersonGroup202Response | TrainPersonGroupDefaultResponse>;\n}\n\nexport interface CreatePersonGroupPerson {\n /**\n * > [!NOTE]\n * >\n * > *\n * > * Free-tier subscription quota:\n * > * 1,000 persons in all Person Groups.\n * > * S0-tier subscription quota:\n * > * 10,000 persons per Person Group.\n * > * 1,000,000 Person Groups.\n * > * 100,000,000 persons in all Person Groups.\n */\n post(\n options: CreatePersonGroupPersonParameters,\n ): StreamableMethod<CreatePersonGroupPerson200Response | CreatePersonGroupPersonDefaultResponse>;\n /**\n * Persons are stored in alphabetical order of personId created in \"Create Person Group Person\".\n * >\n * *\n * * \"start\" parameter (string, optional) specifies an ID value from which returned entries will have larger IDs based on string comparison. Setting \"start\" to an empty value indicates that entries should be returned starting from the first item.\n * * \"top\" parameter (int, optional) determines the maximum number of entries to be returned, with a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, specify \"start\" with the personId of the last entry returned in the current call.\n *\n * > [!TIP]\n * >\n * > * For example, there are total 5 items with their IDs: \"itemId1\", ..., \"itemId5\".\n * > * \"start=&top=\" will return all 5 items.\n * > * \"start=&top=2\" will return \"itemId1\", \"itemId2\".\n * > * \"start=itemId2&top=3\" will return \"itemId3\", \"itemId4\", \"itemId5\".\n */\n get(\n options?: GetPersonGroupPersonsParameters,\n ): StreamableMethod<GetPersonGroupPersons200Response | GetPersonGroupPersonsDefaultResponse>;\n}\n\nexport interface DeletePersonGroupPerson {\n /** Delete an existing person from a Person Group. The persistedFaceId, userData, person name and face feature(s) in the person entry will all be deleted. */\n delete(\n options?: DeletePersonGroupPersonParameters,\n ): StreamableMethod<DeletePersonGroupPerson200Response | DeletePersonGroupPersonDefaultResponse>;\n /** Retrieve a person's name and userData, and the persisted faceIds representing the registered person face feature(s). */\n get(\n options?: GetPersonGroupPersonParameters,\n ): StreamableMethod<GetPersonGroupPerson200Response | GetPersonGroupPersonDefaultResponse>;\n /** Update name or userData of a person. */\n patch(\n options: UpdatePersonGroupPersonParameters,\n ): StreamableMethod<UpdatePersonGroupPerson200Response | UpdatePersonGroupPersonDefaultResponse>;\n}\n\nexport interface AddPersonGroupPersonFaceFromUrl {\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Person Group Person Face\", \"Delete Person Group Person\" or \"Delete Person Group\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Each person entry can hold up to 248 faces.\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n */\n post(\n options: AddPersonGroupPersonFaceFromUrlParameters,\n ): StreamableMethod<\n AddPersonGroupPersonFaceFromUrl200Response | AddPersonGroupPersonFaceFromUrlDefaultResponse\n >;\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Person Group Person Face\", \"Delete Person Group Person\" or \"Delete Person Group\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Each person entry can hold up to 248 faces.\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n */\n post(\n options: AddPersonGroupPersonFaceParameters,\n ): StreamableMethod<\n AddPersonGroupPersonFace200Response | AddPersonGroupPersonFaceDefaultResponse\n >;\n}\n\nexport interface DeletePersonGroupPersonFace {\n /** Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from different persons are processed in parallel. */\n delete(\n options?: DeletePersonGroupPersonFaceParameters,\n ): StreamableMethod<\n DeletePersonGroupPersonFace200Response | DeletePersonGroupPersonFaceDefaultResponse\n >;\n /** Retrieve person face information. The persisted person face is specified by its personGroupId, personId and persistedFaceId. */\n get(\n options?: GetPersonGroupPersonFaceParameters,\n ): StreamableMethod<\n GetPersonGroupPersonFace200Response | GetPersonGroupPersonFaceDefaultResponse\n >;\n /** Update a person persisted face's userData field. */\n patch(\n options: UpdatePersonGroupPersonFaceParameters,\n ): StreamableMethod<\n UpdatePersonGroupPersonFace200Response | UpdatePersonGroupPersonFaceDefaultResponse\n >;\n}\n\nexport interface CreateLargePersonGroup {\n /**\n * A Large Person Group is a container holding the uploaded person data, including the face recognition features. It can hold up to 1,000,000 entities.\n *\n * After creation, use \"Create Large Person Group Person\" to add person into the group, and call \"Train Large Person Group\" to get this group ready for \"Identify From Large Person Group\".\n *\n * No image will be stored. Only the person's extracted face feature(s) and userData will be stored on server until \"Delete Large Person Group Person\" or \"Delete Large Person Group\" is called.\n *\n * 'recognitionModel' should be specified to associate with this Large Person Group. The default value for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly specify the model you need in this parameter. New faces that are added to an existing Large Person Group will use the recognition model that's already associated with the collection. Existing face feature(s) in a Large Person Group can't be updated to features extracted by another version of recognition model.\n *\n * > [!NOTE]\n * >\n * > *\n * > * Free-tier subscription quota: 1,000 Large Person Groups.\n * > * S0-tier subscription quota: 1,000,000 Large Person Groups.\n */\n put(\n options: CreateLargePersonGroupParameters,\n ): StreamableMethod<CreateLargePersonGroup200Response | CreateLargePersonGroupDefaultResponse>;\n /** Delete an existing Large Person Group with specified personGroupId. Persisted data in this Large Person Group will be deleted. */\n delete(\n options?: DeleteLargePersonGroupParameters,\n ): StreamableMethod<DeleteLargePersonGroup200Response | DeleteLargePersonGroupDefaultResponse>;\n /** Retrieve the information of a Large Person Group, including its name, userData and recognitionModel. This API returns Large Person Group information only, use \"Get Large Person Group Persons\" instead to retrieve person information under the Large Person Group. */\n get(\n options?: GetLargePersonGroupParameters,\n ): StreamableMethod<GetLargePersonGroup200Response | GetLargePersonGroupDefaultResponse>;\n /** Update an existing Large Person Group's name and userData. The properties keep unchanged if they are not in request body. */\n patch(\n options: UpdateLargePersonGroupParameters,\n ): StreamableMethod<UpdateLargePersonGroup200Response | UpdateLargePersonGroupDefaultResponse>;\n}\n\nexport interface GetLargePersonGroups {\n /**\n * Large Person Groups are stored in alphabetical order of largePersonGroupId.\n * >\n * *\n * * \"start\" parameter (string, optional) specifies an ID value from which returned entries will have larger IDs based on string comparison. Setting \"start\" to an empty value indicates that entries should be returned starting from the first item.\n * * \"top\" parameter (int, optional) determines the maximum number of entries to be returned, with a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, specify \"start\" with the personId of the last entry returned in the current call.\n *\n * > [!TIP]\n * >\n * > * For example, there are total 5 items with their IDs: \"itemId1\", ..., \"itemId5\".\n * > * \"start=&top=\" will return all 5 items.\n * > * \"start=&top=2\" will return \"itemId1\", \"itemId2\".\n * > * \"start=itemId2&top=3\" will return \"itemId3\", \"itemId4\", \"itemId5\".\n */\n get(\n options?: GetLargePersonGroupsParameters,\n ): StreamableMethod<GetLargePersonGroups200Response | GetLargePersonGroupsDefaultResponse>;\n}\n\nexport interface GetLargePersonGroupTrainingStatus {\n /** Training time depends on the number of person entries, and their faces in a Large Person Group. It could be in seconds, or up to half an hour for 1,000,000 persons. */\n get(\n options?: GetLargePersonGroupTrainingStatusParameters,\n ): StreamableMethod<\n GetLargePersonGroupTrainingStatus200Response | GetLargePersonGroupTrainingStatusDefaultResponse\n >;\n}\n\nexport interface TrainLargePersonGroup {\n /** The training task is an asynchronous task. Training time depends on the number of person entries, and their faces in a Large Person Group. It could be in several seconds, or up to half a hour for 1,000,000 persons. To check training status, please use \"Get Large Person Group Training Status\". */\n post(\n options?: TrainLargePersonGroupParameters,\n ): StreamableMethod<TrainLargePersonGroup202Response | TrainLargePersonGroupDefaultResponse>;\n}\n\nexport interface CreateLargePersonGroupPerson {\n /**\n * > [!NOTE]\n * >\n * > *\n * > * Free-tier subscription quota:\n * > * 1,000 persons in all Large Person Groups.\n * > * S0-tier subscription quota:\n * > * 1,000,000 persons per Large Person Group.\n * > * 1,000,000 Large Person Groups.\n * > * 1,000,000,000 persons in all Large Person Groups.\n */\n post(\n options: CreateLargePersonGroupPersonParameters,\n ): StreamableMethod<\n CreateLargePersonGroupPerson200Response | CreateLargePersonGroupPersonDefaultResponse\n >;\n /**\n * Persons are stored in alphabetical order of personId created in \"Create Large Person Group Person\".\n * >\n * *\n * * \"start\" parameter (string, optional) specifies an ID value from which returned entries will have larger IDs based on string comparison. Setting \"start\" to an empty value indicates that entries should be returned starting from the first item.\n * * \"top\" parameter (int, optional) determines the maximum number of entries to be returned, with a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, specify \"start\" with the personId of the last entry returned in the current call.\n *\n * > [!TIP]\n * >\n * > * For example, there are total 5 items with their IDs: \"itemId1\", ..., \"itemId5\".\n * > * \"start=&top=\" will return all 5 items.\n * > * \"start=&top=2\" will return \"itemId1\", \"itemId2\".\n * > * \"start=itemId2&top=3\" will return \"itemId3\", \"itemId4\", \"itemId5\".\n */\n get(\n options?: GetLargePersonGroupPersonsParameters,\n ): StreamableMethod<\n GetLargePersonGroupPersons200Response | GetLargePersonGroupPersonsDefaultResponse\n >;\n}\n\nexport interface DeleteLargePersonGroupPerson {\n /** Delete an existing person from a Large Person Group. The persistedFaceId, userData, person name and face feature(s) in the person entry will all be deleted. */\n delete(\n options?: DeleteLargePersonGroupPersonParameters,\n ): StreamableMethod<\n DeleteLargePersonGroupPerson200Response | DeleteLargePersonGroupPersonDefaultResponse\n >;\n /** Retrieve a person's name and userData, and the persisted faceIds representing the registered person face feature(s). */\n get(\n options?: GetLargePersonGroupPersonParameters,\n ): StreamableMethod<\n GetLargePersonGroupPerson200Response | GetLargePersonGroupPersonDefaultResponse\n >;\n /** Update name or userData of a person. */\n patch(\n options: UpdateLargePersonGroupPersonParameters,\n ): StreamableMethod<\n UpdateLargePersonGroupPerson200Response | UpdateLargePersonGroupPersonDefaultResponse\n >;\n}\n\nexport interface AddLargePersonGroupPersonFaceFromUrl {\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Large Person Group Person Face\", \"Delete Large Person Group Person\" or \"Delete Large Person Group\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Each person entry can hold up to 248 faces.\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n */\n post(\n options: AddLargePersonGroupPersonFaceFromUrlParameters,\n ): StreamableMethod<\n | AddLargePersonGroupPersonFaceFromUrl200Response\n | AddLargePersonGroupPersonFaceFromUrlDefaultResponse\n >;\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Large Person Group Person Face\", \"Delete Large Person Group Person\" or \"Delete Large Person Group\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Each person entry can hold up to 248 faces.\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n */\n post(\n options: AddLargePersonGroupPersonFaceParameters,\n ): StreamableMethod<\n AddLargePersonGroupPersonFace200Response | AddLargePersonGroupPersonFaceDefaultResponse\n >;\n}\n\nexport interface DeleteLargePersonGroupPersonFace {\n /** Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from different persons are processed in parallel. */\n delete(\n options?: DeleteLargePersonGroupPersonFaceParameters,\n ): StreamableMethod<\n DeleteLargePersonGroupPersonFace200Response | DeleteLargePersonGroupPersonFaceDefaultResponse\n >;\n /** Retrieve person face information. The persisted person face is specified by its largePersonGroupId, personId and persistedFaceId. */\n get(\n options?: GetLargePersonGroupPersonFaceParameters,\n ): StreamableMethod<\n GetLargePersonGroupPersonFace200Response | GetLargePersonGroupPersonFaceDefaultResponse\n >;\n /** Update a person persisted face's userData field. */\n patch(\n options: UpdateLargePersonGroupPersonFaceParameters,\n ): StreamableMethod<\n UpdateLargePersonGroupPersonFace200Response | UpdateLargePersonGroupPersonFaceDefaultResponse\n >;\n}\n\nexport interface CreateLivenessSession {\n /**\n * A session is best for client device scenarios where developers want to authorize a client device to perform only a liveness detection without granting full access to their resource. Created sessions have a limited life span and only authorize clients to perform the desired action before access is expired.\n *\n * Permissions includes...\n * >\n * *\n * * Ability to call /detectLiveness/singleModal for up to 3 retries.\n * * A token lifetime of 10 minutes.\n *\n * > [!NOTE]\n * > Client access can be revoked by deleting the session using the Delete Liveness Session operation. To retrieve a result, use the Get Liveness Session. To audit the individual requests that a client has made to your resource, use the List Liveness Session Audit Entries.\n */\n post(\n options: CreateLivenessSessionParameters,\n ): StreamableMethod<CreateLivenessSession200Response | CreateLivenessSessionDefaultResponse>;\n /**\n * List sessions from the last sessionId greater than the 'start'.\n *\n * The result should be ordered by sessionId in ascending order.\n */\n get(\n options?: GetLivenessSessionsParameters,\n ): StreamableMethod<GetLivenessSessions200Response | GetLivenessSessionsDefaultResponse>;\n}\n\nexport interface DeleteLivenessSession {\n /**\n * > [!NOTE]\n * > Deleting a session deactivates the Session Auth Token by blocking future API calls made with that Auth Token. While this can be used to remove any access for that token, those requests will still count towards overall resource rate limits. It's best to leverage TokenTTL to limit length of tokens in the case that it is misused.\n */\n delete(\n options?: DeleteLivenessSessionParameters,\n ): StreamableMethod<DeleteLivenessSession200Response | DeleteLivenessSessionDefaultResponse>;\n /** Get session result of detectLiveness/singleModal call. */\n get(\n options?: GetLivenessSessionResultParameters,\n ): StreamableMethod<\n GetLivenessSessionResult200Response | GetLivenessSessionResultDefaultResponse\n >;\n}\n\nexport interface GetLivenessSessionAuditEntries {\n /** Gets session requests and response body for the session. */\n get(\n options?: GetLivenessSessionAuditEntriesParameters,\n ): StreamableMethod<\n GetLivenessSessionAuditEntries200Response | GetLivenessSessionAuditEntriesDefaultResponse\n >;\n}\n\nexport interface CreateLivenessWithVerifySessionWithVerifyImage {\n /**\n * A session is best for client device scenarios where developers want to authorize a client device to perform only a liveness detection without granting full access to their resource. Created sessions have a limited life span and only authorize clients to perform the desired action before access is expired.\n *\n * Permissions includes...\n * >\n * *\n * * Ability to call /detectLivenessWithVerify/singleModal for up to 3 retries.\n * * A token lifetime of 10 minutes.\n *\n * > [!NOTE]\n * >\n * > *\n * > * Client access can be revoked by deleting the session using the Delete Liveness With Verify Session operation.\n * > * To retrieve a result, use the Get Liveness With Verify Session.\n * > * To audit the individual requests that a client has made to your resource, use the List Liveness With Verify Session Audit Entries.\n *\n * Recommended Option: VerifyImage is provided during session creation.\n */\n post(\n options: CreateLivenessWithVerifySessionWithVerifyImageParameters,\n ): StreamableMethod<\n | CreateLivenessWithVerifySessionWithVerifyImage200Response\n | CreateLivenessWithVerifySessionWithVerifyImageDefaultResponse\n >;\n /**\n * A session is best for client device scenarios where developers want to authorize a client device to perform only a liveness detection without granting full access to their resource. Created sessions have a limited life span and only authorize clients to perform the desired action before access is expired.\n *\n * Permissions includes...\n * >\n * *\n * * Ability to call /detectLivenessWithVerify/singleModal for up to 3 retries.\n * * A token lifetime of 10 minutes.\n *\n * > [!NOTE]\n * >\n * > *\n * > * Client access can be revoked by deleting the session using the Delete Liveness With Verify Session operation.\n * > * To retrieve a result, use the Get Liveness With Verify Session.\n * > * To audit the individual requests that a client has made to your resource, use the List Liveness With Verify Session Audit Entries.\n *\n * Alternative Option: Client device submits VerifyImage during the /detectLivenessWithVerify/singleModal call.\n * > [!NOTE]\n * > Extra measures should be taken to validate that the client is sending the expected VerifyImage.\n */\n post(\n options: CreateLivenessWithVerifySessionParameters,\n ): StreamableMethod<\n CreateLivenessWithVerifySession200Response | CreateLivenessWithVerifySessionDefaultResponse\n >;\n /**\n * List sessions from the last sessionId greater than the \"start\".\n *\n * The result should be ordered by sessionId in ascending order.\n */\n get(\n options?: GetLivenessWithVerifySessionsParameters,\n ): StreamableMethod<\n GetLivenessWithVerifySessions200Response | GetLivenessWithVerifySessionsDefaultResponse\n >;\n}\n\nexport interface DeleteLivenessWithVerifySession {\n /**\n * > [!NOTE]\n * > Deleting a session deactivates the Session Auth Token by blocking future API calls made with that Auth Token. While this can be used to remove any access for that token, those requests will still count towards overall resource rate limits. It's best to leverage TokenTTL to limit length of tokens in the case that it is misused.\n */\n delete(\n options?: DeleteLivenessWithVerifySessionParameters,\n ): StreamableMethod<\n DeleteLivenessWithVerifySession200Response | DeleteLivenessWithVerifySessionDefaultResponse\n >;\n /** Get session result of detectLivenessWithVerify/singleModal call. */\n get(\n options?: GetLivenessWithVerifySessionResultParameters,\n ): StreamableMethod<\n | GetLivenessWithVerifySessionResult200Response\n | GetLivenessWithVerifySessionResultDefaultResponse\n >;\n}\n\nexport interface GetLivenessWithVerifySessionAuditEntries {\n /** Gets session requests and response body for the session. */\n get(\n options?: GetLivenessWithVerifySessionAuditEntriesParameters,\n ): StreamableMethod<\n | GetLivenessWithVerifySessionAuditEntries200Response\n | GetLivenessWithVerifySessionAuditEntriesDefaultResponse\n >;\n}\n\nexport interface GetSessionImage {\n /** Get session image stored during the liveness session. */\n get(\n options?: GetSessionImageParameters,\n ): StreamableMethod<GetSessionImage200Response | GetSessionImageDefaultResponse>;\n}\n\nexport interface CreatePerson {\n /** Creates a new person in a Person Directory. To add face to this person, please call Person Directory \"Add Person Face\". */\n post(\n options: CreatePersonParameters,\n ): StreamableMethod<CreatePerson202Response | CreatePersonDefaultResponse>;\n /**\n * Persons are stored in alphabetical order of personId created in Person Directory \"Create Person\".\n * >\n * *\n * * \"start\" parameter (string, optional) specifies an ID value from which returned entries will have larger IDs based on string comparison. Setting \"start\" to an empty value indicates that entries should be returned starting from the first item.\n * * \"top\" parameter (int, optional) determines the maximum number of entries to be returned, with a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, specify \"start\" with the personId of the last entry returned in the current call.\n *\n * > [!TIP]\n * >\n * > * For example, there are total 5 items with their IDs: \"itemId1\", ..., \"itemId5\".\n * > * \"start=&top=\" will return all 5 items.\n * > * \"start=&top=2\" will return \"itemId1\", \"itemId2\".\n * > * \"start=itemId2&top=3\" will return \"itemId3\", \"itemId4\", \"itemId5\".\n */\n get(\n options?: GetPersonsParameters,\n ): StreamableMethod<GetPersons200Response | GetPersonsDefaultResponse>;\n}\n\nexport interface DeletePerson {\n /** Delete an existing person from Person Directory. The persistedFaceId(s), userData, person name and face feature(s) in the person entry will all be deleted. */\n delete(\n options?: DeletePersonParameters,\n ): StreamableMethod<DeletePerson202Response | DeletePersonDefaultResponse>;\n /** Retrieve a person's name and userData from Person Directory. */\n get(\n options?: GetPersonParameters,\n ): StreamableMethod<GetPerson200Response | GetPersonDefaultResponse>;\n /** Update name or userData of a person. */\n patch(\n options: UpdatePersonParameters,\n ): StreamableMethod<UpdatePerson200Response | UpdatePersonDefaultResponse>;\n}\n\nexport interface GetDynamicPersonGroupReferences {\n /**\n * Dynamic Person Groups are stored in alphabetical order of Dynamic Person Group ID created in Person Directory \"Create Dynamic Person Group\".\n * >\n * *\n * * \"start\" parameter (string, optional) specifies an ID value from which returned entries will have larger IDs based on string comparison. Setting \"start\" to an empty value indicates that entries should be returned starting from the first item.\n * * \"top\" parameter (int, optional) determines the maximum number of entries to be returned, with a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, specify \"start\" with the personId of the last entry returned in the current call.\n *\n * > [!TIP]\n * >\n * > * For example, there are total 5 items with their IDs: \"itemId1\", ..., \"itemId5\".\n * > * \"start=&top=\" will return all 5 items.\n * > * \"start=&top=2\" will return \"itemId1\", \"itemId2\".\n * > * \"start=itemId2&top=3\" will return \"itemId3\", \"itemId4\", \"itemId5\".\n */\n get(\n options?: GetDynamicPersonGroupReferencesParameters,\n ): StreamableMethod<\n GetDynamicPersonGroupReferences200Response | GetDynamicPersonGroupReferencesDefaultResponse\n >;\n}\n\nexport interface AddPersonFace {\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until Person Directory \"Delete Person Face\" or \"Delete Person\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Each person entry can hold up to 248 faces.\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n * * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from different persons are processed in parallel.\n * * This is a long running operation. Use Response Header \"Operation-Location\" to determine when the AddFace operation has successfully propagated for future requests to \"Identify\". For further information about Operation-Locations see \"Get Face Operation Status\".\n */\n post(\n options: AddPersonFaceParameters,\n ): StreamableMethod<AddPersonFace202Response | AddPersonFaceDefaultResponse>;\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until Person Directory \"Delete Person Face\" or \"Delete Person\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Each person entry can hold up to 248 faces.\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n * * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from different persons are processed in parallel.\n * * This is a long running operation. Use Response Header \"Operation-Location\" to determine when the AddFace operation has successfully propagated for future requests to \"Identify\". For further information about Operation-Locations see \"Get Face Operation Status\".\n */\n post(\n options: AddPersonFaceFromUrlParameters,\n ): StreamableMethod<AddPersonFaceFromUrl202Response | AddPersonFaceFromUrlDefaultResponse>;\n /** Retrieve a person's persistedFaceIds representing the registered person face feature(s). */\n get(\n options?: GetPersonFacesParameters,\n ): StreamableMethod<GetPersonFaces200Response | GetPersonFacesDefaultResponse>;\n}\n\nexport interface DeletePersonFace {\n /** Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from different persons are processed in parallel. */\n delete(\n options?: DeletePersonFaceParameters,\n ): StreamableMethod<DeletePersonFace202Response | DeletePersonFaceDefaultResponse>;\n /** Retrieve person face information. The persisted person face is specified by its personId. recognitionModel, and persistedFaceId. */\n get(\n options?: GetPersonFaceParameters,\n ): StreamableMethod<GetPersonFace200Response | GetPersonFaceDefaultResponse>;\n /** Update a persisted face's userData field of a person. */\n patch(\n options: UpdatePersonFaceParameters,\n ): StreamableMethod<UpdatePersonFace200Response | UpdatePersonFaceDefaultResponse>;\n}\n\nexport interface CreateDynamicPersonGroupWithPerson {\n /**\n * A Dynamic Person Group is a container that references Person Directory \"Create Person\". After creation, use Person Directory \"Update Dynamic Person Group\" to add/remove persons to/from the Dynamic Person Group.\n *\n * Dynamic Person Group and user data will be stored on server until Person Directory \"Delete Dynamic Person Group\" is called. Use \"Identify From Dynamic Person Group\" with the dynamicPersonGroupId parameter to identify against persons.\n *\n * No image will be stored. Only the person's extracted face feature(s) and userData will be stored on server until Person Directory \"Delete Person\" or \"Delete Person Face\" is called.\n *\n * 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person Groups are references to Person Directory \"Create Person\" and therefore work with most all 'recognitionModels'. The faceId's provided during \"Identify\" determine the 'recognitionModel' used.\n */\n put(\n options: CreateDynamicPersonGroupWithPersonParameters,\n ): StreamableMethod<\n | CreateDynamicPersonGroupWithPerson202Response\n | CreateDynamicPersonGroupWithPersonDefaultResponse\n >;\n /**\n * A Dynamic Person Group is a container that references Person Directory \"Create Person\". After creation, use Person Directory \"Update Dynamic Person Group\" to add/remove persons to/from the Dynamic Person Group.\n *\n * Dynamic Person Group and user data will be stored on server until Person Directory \"Delete Dynamic Person Group\" is called. Use \"Identify From Dynamic Person Group\" with the dynamicPersonGroupId parameter to identify against persons.\n *\n * No image will be stored. Only the person's extracted face feature(s) and userData will be stored on server until Person Directory \"Delete Person\" or \"Delete Person Face\" is called.\n *\n * 'recognitionModel' does not need to be specified with Dynamic Person Groups. Dynamic Person Groups are references to Person Directory \"Create Person\" and therefore work with most all 'recognitionModels'. The faceId's provided during \"Identify\" determine the 'recognitionModel' used.\n */\n put(\n options: CreateDynamicPersonGroupParameters,\n ): StreamableMethod<\n CreateDynamicPersonGroup200Response | CreateDynamicPersonGroupDefaultResponse\n >;\n /** Deleting this Dynamic Person Group only delete the references to persons data. To delete actual person see Person Directory \"Delete Person\". */\n delete(\n options?: DeleteDynamicPersonGroupParameters,\n ): StreamableMethod<\n DeleteDynamicPersonGroup202Response | DeleteDynamicPersonGroupDefaultResponse\n >;\n /** This API returns Dynamic Person Group information only, use Person Directory \"Get Dynamic Person Group Persons\" instead to retrieve person information under the Dynamic Person Group. */\n get(\n options?: GetDynamicPersonGroupParameters,\n ): StreamableMethod<GetDynamicPersonGroup200Response | GetDynamicPersonGroupDefaultResponse>;\n /** The properties keep unchanged if they are not in request body. */\n patch(\n options: UpdateDynamicPersonGroupWithPersonChangesParameters,\n ): StreamableMethod<\n | UpdateDynamicPersonGroupWithPersonChanges202Response\n | UpdateDynamicPersonGroupWithPersonChangesDefaultResponse\n >;\n /** The properties keep unchanged if they are not in request body. */\n patch(\n options: UpdateDynamicPersonGroupParameters,\n ): StreamableMethod<\n UpdateDynamicPersonGroup200Response | UpdateDynamicPersonGroupDefaultResponse\n >;\n}\n\nexport interface GetDynamicPersonGroups {\n /**\n * Dynamic Person Groups are stored in alphabetical order of dynamicPersonGroupId.\n * >\n * *\n * * \"start\" parameter (string, optional) specifies an ID value from which returned entries will have larger IDs based on string comparison. Setting \"start\" to an empty value indicates that entries should be returned starting from the first item.\n * * \"top\" parameter (int, optional) determines the maximum number of entries to be returned, with a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, specify \"start\" with the personId of the last entry returned in the current call.\n *\n * > [!TIP]\n * >\n * > * For example, there are total 5 items with their IDs: \"itemId1\", ..., \"itemId5\".\n * > * \"start=&top=\" will return all 5 items.\n * > * \"start=&top=2\" will return \"itemId1\", \"itemId2\".\n * > * \"start=itemId2&top=3\" will return \"itemId3\", \"itemId4\", \"itemId5\".\n */\n get(\n options?: GetDynamicPersonGroupsParameters,\n ): StreamableMethod<GetDynamicPersonGroups200Response | GetDynamicPersonGroupsDefaultResponse>;\n}\n\nexport interface GetDynamicPersonGroupPersons {\n /**\n * Persons are stored in alphabetical order of personId created in Person Directory \"Create Person\".\n * >\n * *\n * * \"start\" parameter (string, optional) specifies an ID value from which returned entries will have larger IDs based on string comparison. Setting \"start\" to an empty value indicates that entries should be returned starting from the first item.\n * * \"top\" parameter (int, optional) determines the maximum number of entries to be returned, with a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, specify \"start\" with the personId of the last entry returned in the current call.\n *\n * > [!TIP]\n * >\n * > * For example, there are total 5 items with their IDs: \"itemId1\", ..., \"itemId5\".\n * > * \"start=&top=\" will return all 5 items.\n * > * \"start=&top=2\" will return \"itemId1\", \"itemId2\".\n * > * \"start=itemId2&top=3\" will return \"itemId3\", \"itemId4\", \"itemId5\".\n */\n get(\n options?: GetDynamicPersonGroupPersonsParameters,\n ): StreamableMethod<\n GetDynamicPersonGroupPersons200Response | GetDynamicPersonGroupPersonsDefaultResponse\n >;\n}\n\nexport interface Routes {\n /** Resource for '/operations/\\{operationId\\}' has methods for the following verbs: get */\n (path: \"/operations/{operationId}\", operationId: string): GetOperationResult;\n /** Resource for '/detect' has methods for the following verbs: post */\n (path: \"/detect\"): DetectFromUrl;\n /** Resource for '/findsimilars' has methods for the following verbs: post */\n (path: \"/findsimilars\"): FindSimilar;\n /** Resource for '/identify' has methods for the following verbs: post */\n (path: \"/identify\"): IdentifyFromPersonGroup;\n /** Resource for '/verify' has methods for the following verbs: post */\n (path: \"/verify\"): VerifyFaceToFace;\n /** Resource for '/group' has methods for the following verbs: post */\n (path: \"/group\"): Group;\n /** Resource for '/facelists/\\{faceListId\\}' has methods for the following verbs: put, delete, get, patch */\n (path: \"/facelists/{faceListId}\", faceListId: string): CreateFaceList;\n /** Resource for '/facelists' has methods for the following verbs: get */\n (path: \"/facelists\"): GetFaceLists;\n /** Resource for '/facelists/\\{faceListId\\}/persistedfaces' has methods for the following verbs: post */\n (path: \"/facelists/{faceListId}/persistedfaces\", faceListId: string): AddFaceListFaceFromUrl;\n /** Resource for '/facelists/\\{faceListId\\}/persistedfaces/\\{persistedFaceId\\}' has methods for the following verbs: delete */\n (\n path: \"/facelists/{faceListId}/persistedfaces/{persistedFaceId}\",\n faceListId: string,\n persistedFaceId: string,\n ): DeleteFaceListFace;\n /** Resource for '/largefacelists/\\{largeFaceListId\\}' has methods for the following verbs: put, delete, get, patch */\n (path: \"/largefacelists/{largeFaceListId}\", largeFaceListId: string): CreateLargeFaceList;\n /** Resource for '/largefacelists' has methods for the following verbs: get */\n (path: \"/largefacelists\"): GetLargeFaceLists;\n /** Resource for '/largefacelists/\\{largeFaceListId\\}/training' has methods for the following verbs: get */\n (\n path: \"/largefacelists/{largeFaceListId}/training\",\n largeFaceListId: string,\n ): GetLargeFaceListTrainingStatus;\n /** Resource for '/largefacelists/\\{largeFaceListId\\}/train' has methods for the following verbs: post */\n (path: \"/largefacelists/{largeFaceListId}/train\", largeFaceListId: string): TrainLargeFaceList;\n /** Resource for '/largefacelists/\\{largeFaceListId\\}/persistedfaces' has methods for the following verbs: post, get */\n (\n path: \"/largefacelists/{largeFaceListId}/persistedfaces\",\n largeFaceListId: string,\n ): AddLargeFaceListFaceFromUrl;\n /** Resource for '/largefacelists/\\{largeFaceListId\\}/persistedfaces/\\{persistedFaceId\\}' has methods for the following verbs: delete, get, patch */\n (\n path: \"/largefacelists/{largeFaceListId}/persistedfaces/{persistedFaceId}\",\n largeFaceListId: string,\n persistedFaceId: string,\n ): DeleteLargeFaceListFace;\n /** Resource for '/persongroups/\\{personGroupId\\}' has methods for the following verbs: put, delete, get, patch */\n (path: \"/persongroups/{personGroupId}\", personGroupId: string): CreatePersonGroup;\n /** Resource for '/persongroups' has methods for the following verbs: get */\n (path: \"/persongroups\"): GetPersonGroups;\n /** Resource for '/persongroups/\\{personGroupId\\}/training' has methods for the following verbs: get */\n (\n path: \"/persongroups/{personGroupId}/training\",\n personGroupId: string,\n ): GetPersonGroupTrainingStatus;\n /** Resource for '/persongroups/\\{personGroupId\\}/train' has methods for the following verbs: post */\n (path: \"/persongroups/{personGroupId}/train\", personGroupId: string): TrainPersonGroup;\n /** Resource for '/persongroups/\\{personGroupId\\}/persons' has methods for the following verbs: post, get */\n (path: \"/persongroups/{personGroupId}/persons\", personGroupId: string): CreatePersonGroupPerson;\n /** Resource for '/persongroups/\\{personGroupId\\}/persons/\\{personId\\}' has methods for the following verbs: delete, get, patch */\n (\n path: \"/persongroups/{personGroupId}/persons/{personId}\",\n personGroupId: string,\n personId: string,\n ): DeletePersonGroupPerson;\n /** Resource for '/persongroups/\\{personGroupId\\}/persons/\\{personId\\}/persistedfaces' has methods for the following verbs: post */\n (\n path: \"/persongroups/{personGroupId}/persons/{personId}/persistedfaces\",\n personGroupId: string,\n personId: string,\n ): AddPersonGroupPersonFaceFromUrl;\n /** Resource for '/persongroups/\\{personGroupId\\}/persons/\\{personId\\}/persistedfaces/\\{persistedFaceId\\}' has methods for the following verbs: delete, get, patch */\n (\n path: \"/persongroups/{personGroupId}/persons/{personId}/persistedfaces/{persistedFaceId}\",\n personGroupId: string,\n personId: string,\n persistedFaceId: string,\n ): DeletePersonGroupPersonFace;\n /** Resource for '/largepersongroups/\\{largePersonGroupId\\}' has methods for the following verbs: put, delete, get, patch */\n (\n path: \"/largepersongroups/{largePersonGroupId}\",\n largePersonGroupId: string,\n ): CreateLargePersonGroup;\n /** Resource for '/largepersongroups' has methods for the following verbs: get */\n (path: \"/largepersongroups\"): GetLargePersonGroups;\n /** Resource for '/largepersongroups/\\{largePersonGroupId\\}/training' has methods for the following verbs: get */\n (\n path: \"/largepersongroups/{largePersonGroupId}/training\",\n largePersonGroupId: string,\n ): GetLargePersonGroupTrainingStatus;\n /** Resource for '/largepersongroups/\\{largePersonGroupId\\}/train' has methods for the following verbs: post */\n (\n path: \"/largepersongroups/{largePersonGroupId}/train\",\n largePersonGroupId: string,\n ): TrainLargePersonGroup;\n /** Resource for '/largepersongroups/\\{largePersonGroupId\\}/persons' has methods for the following verbs: post, get */\n (\n path: \"/largepersongroups/{largePersonGroupId}/persons\",\n largePersonGroupId: string,\n ): CreateLargePersonGroupPerson;\n /** Resource for '/largepersongroups/\\{largePersonGroupId\\}/persons/\\{personId\\}' has methods for the following verbs: delete, get, patch */\n (\n path: \"/largepersongroups/{largePersonGroupId}/persons/{personId}\",\n largePersonGroupId: string,\n personId: string,\n ): DeleteLargePersonGroupPerson;\n /** Resource for '/largepersongroups/\\{largePersonGroupId\\}/persons/\\{personId\\}/persistedfaces' has methods for the following verbs: post */\n (\n path: \"/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces\",\n largePersonGroupId: string,\n personId: string,\n ): AddLargePersonGroupPersonFaceFromUrl;\n /** Resource for '/largepersongroups/\\{largePersonGroupId\\}/persons/\\{personId\\}/persistedfaces/\\{persistedFaceId\\}' has methods for the following verbs: delete, get, patch */\n (\n path: \"/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces/{persistedFaceId}\",\n largePersonGroupId: string,\n personId: string,\n persistedFaceId: string,\n ): DeleteLargePersonGroupPersonFace;\n /** Resource for '/detectLiveness/singleModal/sessions' has methods for the following verbs: post, get */\n (path: \"/detectLiveness/singleModal/sessions\"): CreateLivenessSession;\n /** Resource for '/detectLiveness/singleModal/sessions/\\{sessionId\\}' has methods for the following verbs: delete, get */\n (\n path: \"/detectLiveness/singleModal/sessions/{sessionId}\",\n sessionId: string,\n ): DeleteLivenessSession;\n /** Resource for '/detectLiveness/singleModal/sessions/\\{sessionId\\}/audit' has methods for the following verbs: get */\n (\n path: \"/detectLiveness/singleModal/sessions/{sessionId}/audit\",\n sessionId: string,\n ): GetLivenessSessionAuditEntries;\n /** Resource for '/detectLivenessWithVerify/singleModal/sessions' has methods for the following verbs: post, get */\n (\n path: \"/detectLivenessWithVerify/singleModal/sessions\",\n ): CreateLivenessWithVerifySessionWithVerifyImage;\n /** Resource for '/detectLivenessWithVerify/singleModal/sessions/\\{sessionId\\}' has methods for the following verbs: delete, get */\n (\n path: \"/detectLivenessWithVerify/singleModal/sessions/{sessionId}\",\n sessionId: string,\n ): DeleteLivenessWithVerifySession;\n /** Resource for '/detectLivenessWithVerify/singleModal/sessions/\\{sessionId\\}/audit' has methods for the following verbs: get */\n (\n path: \"/detectLivenessWithVerify/singleModal/sessions/{sessionId}/audit\",\n sessionId: string,\n ): GetLivenessWithVerifySessionAuditEntries;\n /** Resource for '/session/sessionImages/\\{sessionImageId\\}' has methods for the following verbs: get */\n (path: \"/session/sessionImages/{sessionImageId}\", sessionImageId: string): GetSessionImage;\n /** Resource for '/persons' has methods for the following verbs: post, get */\n (path: \"/persons\"): CreatePerson;\n /** Resource for '/persons/\\{personId\\}' has methods for the following verbs: delete, get, patch */\n (path: \"/persons/{personId}\", personId: string): DeletePerson;\n /** Resource for '/persons/\\{personId\\}/dynamicPersonGroupReferences' has methods for the following verbs: get */\n (\n path: \"/persons/{personId}/dynamicPersonGroupReferences\",\n personId: string,\n ): GetDynamicPersonGroupReferences;\n /** Resource for '/persons/\\{personId\\}/recognitionModels/\\{recognitionModel\\}/persistedfaces' has methods for the following verbs: post, get */\n (\n path: \"/persons/{personId}/recognitionModels/{recognitionModel}/persistedfaces\",\n personId: string,\n recognitionModel: RecognitionModel,\n ): AddPersonFace;\n /** Resource for '/persons/\\{personId\\}/recognitionModels/\\{recognitionModel\\}/persistedfaces/\\{persistedFaceId\\}' has methods for the following verbs: delete, get, patch */\n (\n path: \"/persons/{personId}/recognitionModels/{recognitionModel}/persistedfaces/{persistedFaceId}\",\n personId: string,\n recognitionModel: RecognitionModel,\n persistedFaceId: string,\n ): DeletePersonFace;\n /** Resource for '/dynamicpersongroups/\\{dynamicPersonGroupId\\}' has methods for the following verbs: put, delete, get, patch */\n (\n path: \"/dynamicpersongroups/{dynamicPersonGroupId}\",\n dynamicPersonGroupId: string,\n ): CreateDynamicPersonGroupWithPerson;\n /** Resource for '/dynamicpersongroups' has methods for the following verbs: get */\n (path: \"/dynamicpersongroups\"): GetDynamicPersonGroups;\n /** Resource for '/dynamicpersongroups/\\{dynamicPersonGroupId\\}/persons' has methods for the following verbs: get */\n (\n path: \"/dynamicpersongroups/{dynamicPersonGroupId}/persons\",\n dynamicPersonGroupId: string,\n ): GetDynamicPersonGroupPersons;\n}\n\nexport type FaceClient = Client & {\n path: Routes;\n};\n"]}
|
|
1
|
+
{"version":3,"file":"clientDefinitions.js","sourceRoot":"","sources":["../../src/clientDefinitions.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT License.\n\nimport type {\n DetectFromUrlParameters,\n DetectParameters,\n DetectFromSessionImageIdParameters,\n FindSimilarParameters,\n FindSimilarFromFaceListParameters,\n FindSimilarFromLargeFaceListParameters,\n IdentifyFromPersonGroupParameters,\n IdentifyFromLargePersonGroupParameters,\n IdentifyFromPersonDirectoryParameters,\n IdentifyFromDynamicPersonGroupParameters,\n VerifyFaceToFaceParameters,\n VerifyFromPersonGroupParameters,\n VerifyFromLargePersonGroupParameters,\n VerifyFromPersonDirectoryParameters,\n GroupParameters,\n CreateFaceListParameters,\n DeleteFaceListParameters,\n GetFaceListParameters,\n UpdateFaceListParameters,\n GetFaceListsParameters,\n AddFaceListFaceFromUrlParameters,\n AddFaceListFaceParameters,\n DeleteFaceListFaceParameters,\n CreateLargeFaceListParameters,\n DeleteLargeFaceListParameters,\n GetLargeFaceListParameters,\n UpdateLargeFaceListParameters,\n GetLargeFaceListsParameters,\n GetLargeFaceListTrainingStatusParameters,\n TrainLargeFaceListParameters,\n AddLargeFaceListFaceFromUrlParameters,\n AddLargeFaceListFaceParameters,\n GetLargeFaceListFacesParameters,\n DeleteLargeFaceListFaceParameters,\n GetLargeFaceListFaceParameters,\n UpdateLargeFaceListFaceParameters,\n CreatePersonGroupParameters,\n DeletePersonGroupParameters,\n GetPersonGroupParameters,\n UpdatePersonGroupParameters,\n GetPersonGroupsParameters,\n GetPersonGroupTrainingStatusParameters,\n TrainPersonGroupParameters,\n CreatePersonGroupPersonParameters,\n GetPersonGroupPersonsParameters,\n DeletePersonGroupPersonParameters,\n GetPersonGroupPersonParameters,\n UpdatePersonGroupPersonParameters,\n AddPersonGroupPersonFaceFromUrlParameters,\n AddPersonGroupPersonFaceParameters,\n DeletePersonGroupPersonFaceParameters,\n GetPersonGroupPersonFaceParameters,\n UpdatePersonGroupPersonFaceParameters,\n CreateLargePersonGroupParameters,\n DeleteLargePersonGroupParameters,\n GetLargePersonGroupParameters,\n UpdateLargePersonGroupParameters,\n GetLargePersonGroupsParameters,\n GetLargePersonGroupTrainingStatusParameters,\n TrainLargePersonGroupParameters,\n CreateLargePersonGroupPersonParameters,\n GetLargePersonGroupPersonsParameters,\n DeleteLargePersonGroupPersonParameters,\n GetLargePersonGroupPersonParameters,\n UpdateLargePersonGroupPersonParameters,\n AddLargePersonGroupPersonFaceFromUrlParameters,\n AddLargePersonGroupPersonFaceParameters,\n DeleteLargePersonGroupPersonFaceParameters,\n GetLargePersonGroupPersonFaceParameters,\n UpdateLargePersonGroupPersonFaceParameters,\n CreateLivenessSessionParameters,\n DeleteLivenessSessionParameters,\n GetLivenessSessionResultParameters,\n CreateLivenessWithVerifySessionParameters,\n DeleteLivenessWithVerifySessionParameters,\n GetLivenessWithVerifySessionResultParameters,\n GetSessionImageParameters,\n} from \"./parameters.js\";\nimport type {\n DetectFromUrl200Response,\n DetectFromUrlDefaultResponse,\n Detect200Response,\n DetectDefaultResponse,\n DetectFromSessionImageId200Response,\n DetectFromSessionImageIdDefaultResponse,\n FindSimilar200Response,\n FindSimilarDefaultResponse,\n FindSimilarFromFaceList200Response,\n FindSimilarFromFaceListDefaultResponse,\n FindSimilarFromLargeFaceList200Response,\n FindSimilarFromLargeFaceListDefaultResponse,\n IdentifyFromPersonGroup200Response,\n IdentifyFromPersonGroupDefaultResponse,\n IdentifyFromLargePersonGroup200Response,\n IdentifyFromLargePersonGroupDefaultResponse,\n IdentifyFromPersonDirectory200Response,\n IdentifyFromPersonDirectoryDefaultResponse,\n IdentifyFromDynamicPersonGroup200Response,\n IdentifyFromDynamicPersonGroupDefaultResponse,\n VerifyFaceToFace200Response,\n VerifyFaceToFaceDefaultResponse,\n VerifyFromPersonGroup200Response,\n VerifyFromPersonGroupDefaultResponse,\n VerifyFromLargePersonGroup200Response,\n VerifyFromLargePersonGroupDefaultResponse,\n VerifyFromPersonDirectory200Response,\n VerifyFromPersonDirectoryDefaultResponse,\n Group200Response,\n GroupDefaultResponse,\n CreateFaceList200Response,\n CreateFaceListDefaultResponse,\n DeleteFaceList200Response,\n DeleteFaceListDefaultResponse,\n GetFaceList200Response,\n GetFaceListDefaultResponse,\n UpdateFaceList200Response,\n UpdateFaceListDefaultResponse,\n GetFaceLists200Response,\n GetFaceListsDefaultResponse,\n AddFaceListFaceFromUrl200Response,\n AddFaceListFaceFromUrlDefaultResponse,\n AddFaceListFace200Response,\n AddFaceListFaceDefaultResponse,\n DeleteFaceListFace200Response,\n DeleteFaceListFaceDefaultResponse,\n CreateLargeFaceList200Response,\n CreateLargeFaceListDefaultResponse,\n DeleteLargeFaceList200Response,\n DeleteLargeFaceListDefaultResponse,\n GetLargeFaceList200Response,\n GetLargeFaceListDefaultResponse,\n UpdateLargeFaceList200Response,\n UpdateLargeFaceListDefaultResponse,\n GetLargeFaceLists200Response,\n GetLargeFaceListsDefaultResponse,\n GetLargeFaceListTrainingStatus200Response,\n GetLargeFaceListTrainingStatusDefaultResponse,\n TrainLargeFaceList202Response,\n TrainLargeFaceListDefaultResponse,\n AddLargeFaceListFaceFromUrl200Response,\n AddLargeFaceListFaceFromUrlDefaultResponse,\n AddLargeFaceListFace200Response,\n AddLargeFaceListFaceDefaultResponse,\n GetLargeFaceListFaces200Response,\n GetLargeFaceListFacesDefaultResponse,\n DeleteLargeFaceListFace200Response,\n DeleteLargeFaceListFaceDefaultResponse,\n GetLargeFaceListFace200Response,\n GetLargeFaceListFaceDefaultResponse,\n UpdateLargeFaceListFace200Response,\n UpdateLargeFaceListFaceDefaultResponse,\n CreatePersonGroup200Response,\n CreatePersonGroupDefaultResponse,\n DeletePersonGroup200Response,\n DeletePersonGroupDefaultResponse,\n GetPersonGroup200Response,\n GetPersonGroupDefaultResponse,\n UpdatePersonGroup200Response,\n UpdatePersonGroupDefaultResponse,\n GetPersonGroups200Response,\n GetPersonGroupsDefaultResponse,\n GetPersonGroupTrainingStatus200Response,\n GetPersonGroupTrainingStatusDefaultResponse,\n TrainPersonGroup202Response,\n TrainPersonGroupDefaultResponse,\n CreatePersonGroupPerson200Response,\n CreatePersonGroupPersonDefaultResponse,\n GetPersonGroupPersons200Response,\n GetPersonGroupPersonsDefaultResponse,\n DeletePersonGroupPerson200Response,\n DeletePersonGroupPersonDefaultResponse,\n GetPersonGroupPerson200Response,\n GetPersonGroupPersonDefaultResponse,\n UpdatePersonGroupPerson200Response,\n UpdatePersonGroupPersonDefaultResponse,\n AddPersonGroupPersonFaceFromUrl200Response,\n AddPersonGroupPersonFaceFromUrlDefaultResponse,\n AddPersonGroupPersonFace200Response,\n AddPersonGroupPersonFaceDefaultResponse,\n DeletePersonGroupPersonFace200Response,\n DeletePersonGroupPersonFaceDefaultResponse,\n GetPersonGroupPersonFace200Response,\n GetPersonGroupPersonFaceDefaultResponse,\n UpdatePersonGroupPersonFace200Response,\n UpdatePersonGroupPersonFaceDefaultResponse,\n CreateLargePersonGroup200Response,\n CreateLargePersonGroupDefaultResponse,\n DeleteLargePersonGroup200Response,\n DeleteLargePersonGroupDefaultResponse,\n GetLargePersonGroup200Response,\n GetLargePersonGroupDefaultResponse,\n UpdateLargePersonGroup200Response,\n UpdateLargePersonGroupDefaultResponse,\n GetLargePersonGroups200Response,\n GetLargePersonGroupsDefaultResponse,\n GetLargePersonGroupTrainingStatus200Response,\n GetLargePersonGroupTrainingStatusDefaultResponse,\n TrainLargePersonGroup202Response,\n TrainLargePersonGroupDefaultResponse,\n CreateLargePersonGroupPerson200Response,\n CreateLargePersonGroupPersonDefaultResponse,\n GetLargePersonGroupPersons200Response,\n GetLargePersonGroupPersonsDefaultResponse,\n DeleteLargePersonGroupPerson200Response,\n DeleteLargePersonGroupPersonDefaultResponse,\n GetLargePersonGroupPerson200Response,\n GetLargePersonGroupPersonDefaultResponse,\n UpdateLargePersonGroupPerson200Response,\n UpdateLargePersonGroupPersonDefaultResponse,\n AddLargePersonGroupPersonFaceFromUrl200Response,\n AddLargePersonGroupPersonFaceFromUrlDefaultResponse,\n AddLargePersonGroupPersonFace200Response,\n AddLargePersonGroupPersonFaceDefaultResponse,\n DeleteLargePersonGroupPersonFace200Response,\n DeleteLargePersonGroupPersonFaceDefaultResponse,\n GetLargePersonGroupPersonFace200Response,\n GetLargePersonGroupPersonFaceDefaultResponse,\n UpdateLargePersonGroupPersonFace200Response,\n UpdateLargePersonGroupPersonFaceDefaultResponse,\n CreateLivenessSession200Response,\n CreateLivenessSessionDefaultResponse,\n DeleteLivenessSession204Response,\n DeleteLivenessSessionDefaultResponse,\n GetLivenessSessionResult200Response,\n GetLivenessSessionResultDefaultResponse,\n CreateLivenessWithVerifySession200Response,\n CreateLivenessWithVerifySessionDefaultResponse,\n DeleteLivenessWithVerifySession204Response,\n DeleteLivenessWithVerifySessionDefaultResponse,\n GetLivenessWithVerifySessionResult200Response,\n GetLivenessWithVerifySessionResultDefaultResponse,\n GetSessionImage200Response,\n GetSessionImageDefaultResponse,\n} from \"./responses.js\";\nimport type { Client, StreamableMethod } from \"@azure-rest/core-client\";\n\nexport interface DetectFromUrl {\n /**\n * > [!IMPORTANT]\n * > Microsoft has retired or limited facial recognition capabilities that can be used to try to infer emotional states and identity attributes which, if misused, can subject people to stereotyping, discrimination or unfair denial of services. The retired capabilities are emotion and gender. The limited capabilities are age, smile, facial hair, hair and makeup. Email [Azure Face API](mailto:azureface@microsoft.com) if you have a responsible use case that would benefit from the use of any of the limited capabilities. Read more about this decision [here](https://azure.microsoft.com/blog/responsible-ai-investments-and-safeguards-for-facial-recognition/).\n *\n * *\n * * No image will be stored. Only the extracted face feature(s) will be stored on server. The faceId is an identifier of the face feature and will be used in \"Identify\", \"Verify\", and \"Find Similar\". The stored face features will expire and be deleted at the time specified by faceIdTimeToLive after the original detection call.\n * * Optional parameters include faceId, landmarks, and attributes. Attributes include headPose, glasses, occlusion, accessories, blur, exposure, noise, mask, and qualityForRecognition. Some of the results returned for specific attributes may not be highly accurate.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from large to small.\n * * For optimal results when querying \"Identify\", \"Verify\", and \"Find Similar\" ('returnFaceId' is true), please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels (100 pixels between eyes).\n * * Different 'detectionModel' values can be provided. The availability of landmarks and supported attributes depends on the detection model specified. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n * * Different 'recognitionModel' values are provided. If follow-up operations like \"Verify\", \"Identify\", \"Find Similar\" are needed, please specify the recognition model with 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More details, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-recognition-model).\n */\n post(\n options: DetectFromUrlParameters,\n ): StreamableMethod<DetectFromUrl200Response | DetectFromUrlDefaultResponse>;\n /**\n * > [!IMPORTANT]\n * > Microsoft has retired or limited facial recognition capabilities that can be used to try to infer emotional states and identity attributes which, if misused, can subject people to stereotyping, discrimination or unfair denial of services. The retired capabilities are emotion and gender. The limited capabilities are age, smile, facial hair, hair and makeup. Email [Azure Face API](mailto:azureface@microsoft.com) if you have a responsible use case that would benefit from the use of any of the limited capabilities. Read more about this decision [here](https://azure.microsoft.com/blog/responsible-ai-investments-and-safeguards-for-facial-recognition/).\n *\n * *\n * * No image will be stored. Only the extracted face feature(s) will be stored on server. The faceId is an identifier of the face feature and will be used in \"Identify\", \"Verify\", and \"Find Similar\". The stored face features will expire and be deleted at the time specified by faceIdTimeToLive after the original detection call.\n * * Optional parameters include faceId, landmarks, and attributes. Attributes include headPose, glasses, occlusion, accessories, blur, exposure, noise, mask, and qualityForRecognition. Some of the results returned for specific attributes may not be highly accurate.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from large to small.\n * * For optimal results when querying \"Identify\", \"Verify\", and \"Find Similar\" ('returnFaceId' is true), please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels (100 pixels between eyes).\n * * Different 'detectionModel' values can be provided. The availability of landmarks and supported attributes depends on the detection model specified. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n * * Different 'recognitionModel' values are provided. If follow-up operations like \"Verify\", \"Identify\", \"Find Similar\" are needed, please specify the recognition model with 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More details, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-recognition-model).\n */\n post(options: DetectParameters): StreamableMethod<Detect200Response | DetectDefaultResponse>;\n /**\n * > [!IMPORTANT]\n * > Microsoft has retired or limited facial recognition capabilities that can be used to try to infer emotional states and identity attributes which, if misused, can subject people to stereotyping, discrimination or unfair denial of services. The retired capabilities are emotion and gender. The limited capabilities are age, smile, facial hair, hair and makeup. Email [Azure Face API](mailto:azureface@microsoft.com) if you have a responsible use case that would benefit from the use of any of the limited capabilities. Read more about this decision [here](https://azure.microsoft.com/blog/responsible-ai-investments-and-safeguards-for-facial-recognition/).\n *\n * *\n * * No image will be stored. Only the extracted face feature(s) will be stored on server. The faceId is an identifier of the face feature and will be used in \"Identify\", \"Verify\", and \"Find Similar\". The stored face features will expire and be deleted at the time specified by faceIdTimeToLive after the original detection call.\n * * Optional parameters include faceId, landmarks, and attributes. Attributes include headPose, glasses, occlusion, accessories, blur, exposure, noise, mask, and qualityForRecognition. Some of the results returned for specific attributes may not be highly accurate.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from large to small.\n * * For optimal results when querying \"Identify\", \"Verify\", and \"Find Similar\" ('returnFaceId' is true), please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels (100 pixels between eyes).\n * * Different 'detectionModel' values can be provided. The availability of landmarks and supported attributes depends on the detection model specified. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n * * Different 'recognitionModel' values are provided. If follow-up operations like \"Verify\", \"Identify\", \"Find Similar\" are needed, please specify the recognition model with 'recognitionModel' parameter. The default value for 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More details, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-recognition-model).\n */\n post(\n options: DetectFromSessionImageIdParameters,\n ): StreamableMethod<\n DetectFromSessionImageId200Response | DetectFromSessionImageIdDefaultResponse\n >;\n}\n\nexport interface FindSimilar {\n /**\n * Depending on the input the returned similar faces list contains faceIds or persistedFaceIds ranked by similarity.\n *\n * Find similar has two working modes, \"matchPerson\" and \"matchFace\". \"matchPerson\" is the default mode that it tries to find faces of the same person as possible by using internal same-person thresholds. It is useful to find a known person's other photos. Note that an empty list will be returned if no faces pass the internal thresholds. \"matchFace\" mode ignores same-person thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used in the cases like searching celebrity-looking faces.\n *\n * The 'recognitionModel' associated with the query faceId should be the same as the 'recognitionModel' used by the target faceId array.\n */\n post(\n options: FindSimilarParameters,\n ): StreamableMethod<FindSimilar200Response | FindSimilarDefaultResponse>;\n /**\n * Depending on the input the returned similar faces list contains faceIds or persistedFaceIds ranked by similarity.\n *\n * Find similar has two working modes, \"matchPerson\" and \"matchFace\". \"matchPerson\" is the default mode that it tries to find faces of the same person as possible by using internal same-person thresholds. It is useful to find a known person's other photos. Note that an empty list will be returned if no faces pass the internal thresholds. \"matchFace\" mode ignores same-person thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used in the cases like searching celebrity-looking faces.\n *\n * The 'recognitionModel' associated with the query faceId should be the same as the 'recognitionModel' used by the target Face List.\n */\n post(\n options: FindSimilarFromFaceListParameters,\n ): StreamableMethod<FindSimilarFromFaceList200Response | FindSimilarFromFaceListDefaultResponse>;\n /**\n * Depending on the input the returned similar faces list contains faceIds or persistedFaceIds ranked by similarity.\n *\n * Find similar has two working modes, \"matchPerson\" and \"matchFace\". \"matchPerson\" is the default mode that it tries to find faces of the same person as possible by using internal same-person thresholds. It is useful to find a known person's other photos. Note that an empty list will be returned if no faces pass the internal thresholds. \"matchFace\" mode ignores same-person thresholds and returns ranked similar faces anyway, even the similarity is low. It can be used in the cases like searching celebrity-looking faces.\n *\n * The 'recognitionModel' associated with the query faceId should be the same as the 'recognitionModel' used by the target Large Face List.\n */\n post(\n options: FindSimilarFromLargeFaceListParameters,\n ): StreamableMethod<\n FindSimilarFromLargeFaceList200Response | FindSimilarFromLargeFaceListDefaultResponse\n >;\n}\n\nexport interface IdentifyFromPersonGroup {\n /**\n * For each face in the faceIds array, Face Identify will compute similarities between the query face and all the faces in the Person Group (given by personGroupId), and return candidate person(s) for that face ranked by similarity confidence. The Person Group should be trained to make it ready for identification. See more in \"Train Person Group\".\n * > [!NOTE]\n * >\n * > *\n * > * The algorithm allows more than one face to be identified independently at the same request, but no more than 10 faces.\n * > * Each person could have more than one face, but no more than 248 faces.\n * > * Higher face image quality means better identification precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * > * Number of candidates returned is restricted by maxNumOfCandidatesReturned and confidenceThreshold. If no person is identified, the returned candidates will be an empty array.\n * > * Try \"Find Similar\" when you need to find similar faces from a Face List/Large Face List instead of a Person Group.\n * > * The 'recognitionModel' associated with the query faces' faceIds should be the same as the 'recognitionModel' used by the target Person Group.\n */\n post(\n options: IdentifyFromPersonGroupParameters,\n ): StreamableMethod<IdentifyFromPersonGroup200Response | IdentifyFromPersonGroupDefaultResponse>;\n /**\n * For each face in the faceIds array, Face Identify will compute similarities between the query face and all the faces in the Large Person Group (given by largePersonGroupId), and return candidate person(s) for that face ranked by similarity confidence. The Large Person Group should be trained to make it ready for identification. See more in \"Train Large Person Group\".\n * > [!NOTE]\n * >\n * > *\n * > * The algorithm allows more than one face to be identified independently at the same request, but no more than 10 faces.\n * > * Each person could have more than one face, but no more than 248 faces.\n * > * Higher face image quality means better identification precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * > * Number of candidates returned is restricted by maxNumOfCandidatesReturned and confidenceThreshold. If no person is identified, the returned candidates will be an empty array.\n * > * Try \"Find Similar\" when you need to find similar faces from a Face List/Large Face List instead of a Person Group/Large Person Group.\n * > * The 'recognitionModel' associated with the query faces' faceIds should be the same as the 'recognitionModel' used by the target Person Group or Large Person Group.\n */\n post(\n options: IdentifyFromLargePersonGroupParameters,\n ): StreamableMethod<\n IdentifyFromLargePersonGroup200Response | IdentifyFromLargePersonGroupDefaultResponse\n >;\n /**\n * For each face in the faceIds array, Face Identify will compute similarities between the query face and all the faces in the Person Directory Persons (given by personIds), and return candidate person(s) for that face ranked by similarity confidence.\n * Passing personIds with an array with one element \"*\" can perform the operation over entire person directory.\n * > [!NOTE]\n * >\n * > *\n * > * The algorithm allows more than one face to be identified independently at the same request, but no more than 10 faces.\n * > * Each person could have more than one face, but no more than 248 faces.\n * > * Higher face image quality means better identification precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * > * Number of candidates returned is restricted by maxNumOfCandidatesReturned and confidenceThreshold. If no person is identified, the returned candidates will be an empty array.\n * > * The Identify operation can only match faces obtained with the same recognition model, that is associated with the query faces.\n */\n post(\n options: IdentifyFromPersonDirectoryParameters,\n ): StreamableMethod<\n IdentifyFromPersonDirectory200Response | IdentifyFromPersonDirectoryDefaultResponse\n >;\n /**\n * For each face in the faceIds array, Face Identify will compute similarities between the query face and all the faces in the Dynamic Person Group (given by dynamicPersonGroupId), and return candidate person(s) for that face ranked by similarity confidence.\n * > [!NOTE]\n * >\n * > *\n * > * The algorithm allows more than one face to be identified independently at the same request, but no more than 10 faces.\n * > * Each person could have more than one face, but no more than 248 faces.\n * > * Higher face image quality means better identification precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * > * Number of candidates returned is restricted by maxNumOfCandidatesReturned and confidenceThreshold. If no person is identified, the returned candidates will be an empty array.\n * > * The Identify operation can only match faces obtained with the same recognition model, that is associated with the query faces.\n */\n post(\n options: IdentifyFromDynamicPersonGroupParameters,\n ): StreamableMethod<\n IdentifyFromDynamicPersonGroup200Response | IdentifyFromDynamicPersonGroupDefaultResponse\n >;\n}\n\nexport interface VerifyFaceToFace {\n /**\n * > [!NOTE]\n * >\n * > *\n * > * Higher face image quality means better identification precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * > * For the scenarios that are sensitive to accuracy please make your own judgment.\n * > * The 'recognitionModel' associated with the both faces should be the same.\n */\n post(\n options: VerifyFaceToFaceParameters,\n ): StreamableMethod<VerifyFaceToFace200Response | VerifyFaceToFaceDefaultResponse>;\n /**\n * > [!NOTE]\n * >\n * > *\n * > * Higher face image quality means better identification precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * > * For the scenarios that are sensitive to accuracy please make your own judgment.\n * > * The 'recognitionModel' associated with the query face should be the same as the 'recognitionModel' used by the Person Group.\n */\n post(\n options: VerifyFromPersonGroupParameters,\n ): StreamableMethod<VerifyFromPersonGroup200Response | VerifyFromPersonGroupDefaultResponse>;\n /**\n * > [!NOTE]\n * >\n * > *\n * > * Higher face image quality means better identification precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * > * For the scenarios that are sensitive to accuracy please make your own judgment.\n * > * The 'recognitionModel' associated with the query face should be the same as the 'recognitionModel' used by the Large Person Group.\n */\n post(\n options: VerifyFromLargePersonGroupParameters,\n ): StreamableMethod<\n VerifyFromLargePersonGroup200Response | VerifyFromLargePersonGroupDefaultResponse\n >;\n /**\n * > [!NOTE]\n * >\n * > *\n * > * Higher face image quality means better identification precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * > * For the scenarios that are sensitive to accuracy please make your own judgment.\n * > * The Verify operation can only match faces obtained with the same recognition model, that is associated with the query face.\n */\n post(\n options: VerifyFromPersonDirectoryParameters,\n ): StreamableMethod<\n VerifyFromPersonDirectory200Response | VerifyFromPersonDirectoryDefaultResponse\n >;\n}\n\nexport interface Group {\n /**\n * >\n * *\n * * The output is one or more disjointed face groups and a messyGroup. A face group contains faces that have similar looking, often of the same person. Face groups are ranked by group size, i.e. number of faces. Notice that faces belonging to a same person might be split into several groups in the result.\n * * MessyGroup is a special face group containing faces that cannot find any similar counterpart face from original faces. The messyGroup will not appear in the result if all faces found their counterparts.\n * * Group API needs at least 2 candidate faces and 1000 at most. We suggest to try \"Verify Face To Face\" when you only have 2 candidate faces.\n * * The 'recognitionModel' associated with the query faces' faceIds should be the same.\n */\n post(options: GroupParameters): StreamableMethod<Group200Response | GroupDefaultResponse>;\n}\n\nexport interface CreateFaceList {\n /**\n * Up to 64 Face Lists are allowed in one subscription.\n *\n * Face List is a list of faces, up to 1,000 faces, and used by \"Find Similar From Face List\".\n *\n * After creation, user should use \"Add Face List Face\" to import the faces. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Face List\" is called.\n *\n * \"Find Similar\" is used for scenario like finding celebrity-like faces, similar face filtering, or as a light way face identification. But if the actual use is to identify person, please use Person Group / Large Person Group and \"Identify\".\n *\n * Please consider Large Face List when the face number is large. It can support up to 1,000,000 faces.\n */\n put(\n options: CreateFaceListParameters,\n ): StreamableMethod<CreateFaceList200Response | CreateFaceListDefaultResponse>;\n /** Delete a specified Face List. */\n delete(\n options?: DeleteFaceListParameters,\n ): StreamableMethod<DeleteFaceList200Response | DeleteFaceListDefaultResponse>;\n /** Retrieve a Face List's faceListId, name, userData, recognitionModel and faces in the Face List. */\n get(\n options?: GetFaceListParameters,\n ): StreamableMethod<GetFaceList200Response | GetFaceListDefaultResponse>;\n /** Update information of a Face List, including name and userData. */\n patch(\n options: UpdateFaceListParameters,\n ): StreamableMethod<UpdateFaceList200Response | UpdateFaceListDefaultResponse>;\n}\n\nexport interface GetFaceLists {\n /**\n * List Face Lists' faceListId, name, userData and recognitionModel.\n *\n * To get face information inside Face List use \"Get Face List\".\n */\n get(\n options?: GetFaceListsParameters,\n ): StreamableMethod<GetFaceLists200Response | GetFaceListsDefaultResponse>;\n}\n\nexport interface AddFaceListFaceFromUrl {\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Face List Face\" or \"Delete Face List\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n */\n post(\n options: AddFaceListFaceFromUrlParameters,\n ): StreamableMethod<AddFaceListFaceFromUrl200Response | AddFaceListFaceFromUrlDefaultResponse>;\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Face List Face\" or \"Delete Face List\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n */\n post(\n options: AddFaceListFaceParameters,\n ): StreamableMethod<AddFaceListFace200Response | AddFaceListFaceDefaultResponse>;\n}\n\nexport interface DeleteFaceListFace {\n /** Adding/deleting faces to/from a same Face List are processed sequentially and to/from different Face Lists are in parallel. */\n delete(\n options?: DeleteFaceListFaceParameters,\n ): StreamableMethod<DeleteFaceListFace200Response | DeleteFaceListFaceDefaultResponse>;\n}\n\nexport interface CreateLargeFaceList {\n /**\n * Large Face List is a list of faces, up to 1,000,000 faces, and used by \"Find Similar From Large Face List\".\n *\n * After creation, user should use Add Large Face List Face to import the faces and Train Large Face List to make it ready for \"Find Similar\". No image will be stored. Only the extracted face feature(s) will be stored on server until Delete Large Face List is called.\n *\n * \"Find Similar\" is used for scenario like finding celebrity-like faces, similar face filtering, or as a light way face identification. But if the actual use is to identify person, please use Person Group / Large Person Group and \"Identify\".\n *\n * > [!NOTE]\n * >\n * > *\n * > * Free-tier subscription quota: 64 Large Face Lists.\n * > * S0-tier subscription quota: 1,000,000 Large Face Lists.\n */\n put(\n options: CreateLargeFaceListParameters,\n ): StreamableMethod<CreateLargeFaceList200Response | CreateLargeFaceListDefaultResponse>;\n /** Adding/deleting faces to/from a same Large Face List are processed sequentially and to/from different Large Face Lists are in parallel. */\n delete(\n options?: DeleteLargeFaceListParameters,\n ): StreamableMethod<DeleteLargeFaceList200Response | DeleteLargeFaceListDefaultResponse>;\n /** Retrieve a Large Face List's largeFaceListId, name, userData and recognitionModel. */\n get(\n options?: GetLargeFaceListParameters,\n ): StreamableMethod<GetLargeFaceList200Response | GetLargeFaceListDefaultResponse>;\n /** Update information of a Large Face List, including name and userData. */\n patch(\n options: UpdateLargeFaceListParameters,\n ): StreamableMethod<UpdateLargeFaceList200Response | UpdateLargeFaceListDefaultResponse>;\n}\n\nexport interface GetLargeFaceLists {\n /**\n * To get face information inside largeFaceList use \"Get Large Face List Face\".\n *\n * Large Face Lists are stored in alphabetical order of largeFaceListId.\n * >\n * *\n * * \"start\" parameter (string, optional) specifies an ID value from which returned entries will have larger IDs based on string comparison. Setting \"start\" to an empty value indicates that entries should be returned starting from the first item.\n * * \"top\" parameter (int, optional) determines the maximum number of entries to be returned, with a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, specify \"start\" with the personId of the last entry returned in the current call.\n *\n * > [!TIP]\n * >\n * > * For example, there are total 5 items with their IDs: \"itemId1\", ..., \"itemId5\".\n * > * \"start=&top=\" will return all 5 items.\n * > * \"start=&top=2\" will return \"itemId1\", \"itemId2\".\n * > * \"start=itemId2&top=3\" will return \"itemId3\", \"itemId4\", \"itemId5\".\n */\n get(\n options?: GetLargeFaceListsParameters,\n ): StreamableMethod<GetLargeFaceLists200Response | GetLargeFaceListsDefaultResponse>;\n}\n\nexport interface GetLargeFaceListTrainingStatus {\n /**\n * To check the Large Face List training status completed or still ongoing. Large Face List training is an asynchronous operation triggered by \"Train Large Face List\".\n *\n * Training time depends on the number of face entries in a Large Face List. It could be in seconds, or up to half an hour for 1,000,000 faces.\n */\n get(\n options?: GetLargeFaceListTrainingStatusParameters,\n ): StreamableMethod<\n GetLargeFaceListTrainingStatus200Response | GetLargeFaceListTrainingStatusDefaultResponse\n >;\n}\n\nexport interface TrainLargeFaceList {\n /**\n * Training is a crucial step that only a trained Large Face List can be used by \"Find Similar From Large Face List\".\n *\n * The training task is an asynchronous task. Training time depends on the number of face entries in a Large Face List. It could be in seconds, or up to half an hour for 1,000,000 faces. To check training completion, please use \"Get Large Face List Training Status\".\n */\n post(\n options?: TrainLargeFaceListParameters,\n ): StreamableMethod<TrainLargeFaceList202Response | TrainLargeFaceListDefaultResponse>;\n}\n\nexport interface AddLargeFaceListFaceFromUrl {\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Large Face List Face\" or \"Delete Large Face List\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n *\n * > [!NOTE]\n * >\n * > *\n * > * Free-tier subscription quota: 1,000 faces per Large Face List.\n * > * S0-tier subscription quota: 1,000,000 faces per Large Face List.\n */\n post(\n options: AddLargeFaceListFaceFromUrlParameters,\n ): StreamableMethod<\n AddLargeFaceListFaceFromUrl200Response | AddLargeFaceListFaceFromUrlDefaultResponse\n >;\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Large Face List Face\" or \"Delete Large Face List\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n *\n * > [!NOTE]\n * >\n * > *\n * > * Free-tier subscription quota: 1,000 faces per Large Face List.\n * > * S0-tier subscription quota: 1,000,000 faces per Large Face List.\n */\n post(\n options: AddLargeFaceListFaceParameters,\n ): StreamableMethod<AddLargeFaceListFace200Response | AddLargeFaceListFaceDefaultResponse>;\n /**\n * Faces are stored in alphabetical order of persistedFaceId created in \"Add Large Face List Face\".\n * >\n * *\n * * \"start\" parameter (string, optional) specifies an ID value from which returned entries will have larger IDs based on string comparison. Setting \"start\" to an empty value indicates that entries should be returned starting from the first item.\n * * \"top\" parameter (int, optional) determines the maximum number of entries to be returned, with a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, specify \"start\" with the personId of the last entry returned in the current call.\n *\n * > [!TIP]\n * >\n * > * For example, there are total 5 items with their IDs: \"itemId1\", ..., \"itemId5\".\n * > * \"start=&top=\" will return all 5 items.\n * > * \"start=&top=2\" will return \"itemId1\", \"itemId2\".\n * > * \"start=itemId2&top=3\" will return \"itemId3\", \"itemId4\", \"itemId5\".\n */\n get(\n options?: GetLargeFaceListFacesParameters,\n ): StreamableMethod<GetLargeFaceListFaces200Response | GetLargeFaceListFacesDefaultResponse>;\n}\n\nexport interface DeleteLargeFaceListFace {\n /** Delete a face from a Large Face List by specified largeFaceListId and persistedFaceId. */\n delete(\n options?: DeleteLargeFaceListFaceParameters,\n ): StreamableMethod<DeleteLargeFaceListFace200Response | DeleteLargeFaceListFaceDefaultResponse>;\n /** Retrieve persisted face in Large Face List by largeFaceListId and persistedFaceId. */\n get(\n options?: GetLargeFaceListFaceParameters,\n ): StreamableMethod<GetLargeFaceListFace200Response | GetLargeFaceListFaceDefaultResponse>;\n /** Update a specified face's userData field in a Large Face List by its persistedFaceId. */\n patch(\n options: UpdateLargeFaceListFaceParameters,\n ): StreamableMethod<UpdateLargeFaceListFace200Response | UpdateLargeFaceListFaceDefaultResponse>;\n}\n\nexport interface CreatePersonGroup {\n /**\n * A Person Group is a container holding the uploaded person data, including face recognition features.\n *\n * After creation, use \"Create Person Group Person\" to add persons into the group, and then call \"Train Person Group\" to get this group ready for \"Identify From Person Group\".\n *\n * No image will be stored. Only the person's extracted face feature(s) and userData will be stored on server until \"Delete Person Group Person\" or \"Delete Person Group\" is called.\n *\n * 'recognitionModel' should be specified to associate with this Person Group. The default value for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly specify the model you need in this parameter. New faces that are added to an existing Person Group will use the recognition model that's already associated with the collection. Existing face feature(s) in a Person Group can't be updated to features extracted by another version of recognition model.\n *\n * > [!NOTE]\n * >\n * > *\n * > * Free-tier subscription quota: 1,000 Person Groups. Each holds up to 1,000 persons.\n * > * S0-tier subscription quota: 1,000,000 Person Groups. Each holds up to 10,000 persons.\n * > * to handle larger scale face identification problem, please consider using Large Person Group.\n */\n put(\n options: CreatePersonGroupParameters,\n ): StreamableMethod<CreatePersonGroup200Response | CreatePersonGroupDefaultResponse>;\n /** Delete an existing Person Group with specified personGroupId. Persisted data in this Person Group will be deleted. */\n delete(\n options?: DeletePersonGroupParameters,\n ): StreamableMethod<DeletePersonGroup200Response | DeletePersonGroupDefaultResponse>;\n /** Retrieve Person Group name, userData and recognitionModel. To get person information under this personGroup, use \"Get Person Group Persons\". */\n get(\n options?: GetPersonGroupParameters,\n ): StreamableMethod<GetPersonGroup200Response | GetPersonGroupDefaultResponse>;\n /** Update an existing Person Group's name and userData. The properties keep unchanged if they are not in request body. */\n patch(\n options: UpdatePersonGroupParameters,\n ): StreamableMethod<UpdatePersonGroup200Response | UpdatePersonGroupDefaultResponse>;\n}\n\nexport interface GetPersonGroups {\n /**\n * Person Groups are stored in alphabetical order of personGroupId.\n * >\n * *\n * * \"start\" parameter (string, optional) specifies an ID value from which returned entries will have larger IDs based on string comparison. Setting \"start\" to an empty value indicates that entries should be returned starting from the first item.\n * * \"top\" parameter (int, optional) determines the maximum number of entries to be returned, with a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, specify \"start\" with the personId of the last entry returned in the current call.\n *\n * > [!TIP]\n * >\n * > * For example, there are total 5 items with their IDs: \"itemId1\", ..., \"itemId5\".\n * > * \"start=&top=\" will return all 5 items.\n * > * \"start=&top=2\" will return \"itemId1\", \"itemId2\".\n * > * \"start=itemId2&top=3\" will return \"itemId3\", \"itemId4\", \"itemId5\".\n */\n get(\n options?: GetPersonGroupsParameters,\n ): StreamableMethod<GetPersonGroups200Response | GetPersonGroupsDefaultResponse>;\n}\n\nexport interface GetPersonGroupTrainingStatus {\n /** To check Person Group training status completed or still ongoing. Person Group training is an asynchronous operation triggered by \"Train Person Group\" API. */\n get(\n options?: GetPersonGroupTrainingStatusParameters,\n ): StreamableMethod<\n GetPersonGroupTrainingStatus200Response | GetPersonGroupTrainingStatusDefaultResponse\n >;\n}\n\nexport interface TrainPersonGroup {\n /** The training task is an asynchronous task. Training time depends on the number of person entries, and their faces in a Person Group. It could be several seconds to minutes. To check training status, please use \"Get Person Group Training Status\". */\n post(\n options?: TrainPersonGroupParameters,\n ): StreamableMethod<TrainPersonGroup202Response | TrainPersonGroupDefaultResponse>;\n}\n\nexport interface CreatePersonGroupPerson {\n /**\n * > [!NOTE]\n * >\n * > *\n * > * Free-tier subscription quota:\n * > * 1,000 persons in all Person Groups.\n * > * S0-tier subscription quota:\n * > * 10,000 persons per Person Group.\n * > * 1,000,000 Person Groups.\n * > * 100,000,000 persons in all Person Groups.\n */\n post(\n options: CreatePersonGroupPersonParameters,\n ): StreamableMethod<CreatePersonGroupPerson200Response | CreatePersonGroupPersonDefaultResponse>;\n /**\n * Persons are stored in alphabetical order of personId created in \"Create Person Group Person\".\n * >\n * *\n * * \"start\" parameter (string, optional) specifies an ID value from which returned entries will have larger IDs based on string comparison. Setting \"start\" to an empty value indicates that entries should be returned starting from the first item.\n * * \"top\" parameter (int, optional) determines the maximum number of entries to be returned, with a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, specify \"start\" with the personId of the last entry returned in the current call.\n *\n * > [!TIP]\n * >\n * > * For example, there are total 5 items with their IDs: \"itemId1\", ..., \"itemId5\".\n * > * \"start=&top=\" will return all 5 items.\n * > * \"start=&top=2\" will return \"itemId1\", \"itemId2\".\n * > * \"start=itemId2&top=3\" will return \"itemId3\", \"itemId4\", \"itemId5\".\n */\n get(\n options?: GetPersonGroupPersonsParameters,\n ): StreamableMethod<GetPersonGroupPersons200Response | GetPersonGroupPersonsDefaultResponse>;\n}\n\nexport interface DeletePersonGroupPerson {\n /** Delete an existing person from a Person Group. The persistedFaceId, userData, person name and face feature(s) in the person entry will all be deleted. */\n delete(\n options?: DeletePersonGroupPersonParameters,\n ): StreamableMethod<DeletePersonGroupPerson200Response | DeletePersonGroupPersonDefaultResponse>;\n /** Retrieve a person's name and userData, and the persisted faceIds representing the registered person face feature(s). */\n get(\n options?: GetPersonGroupPersonParameters,\n ): StreamableMethod<GetPersonGroupPerson200Response | GetPersonGroupPersonDefaultResponse>;\n /** Update name or userData of a person. */\n patch(\n options: UpdatePersonGroupPersonParameters,\n ): StreamableMethod<UpdatePersonGroupPerson200Response | UpdatePersonGroupPersonDefaultResponse>;\n}\n\nexport interface AddPersonGroupPersonFaceFromUrl {\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Person Group Person Face\", \"Delete Person Group Person\" or \"Delete Person Group\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Each person entry can hold up to 248 faces.\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n */\n post(\n options: AddPersonGroupPersonFaceFromUrlParameters,\n ): StreamableMethod<\n AddPersonGroupPersonFaceFromUrl200Response | AddPersonGroupPersonFaceFromUrlDefaultResponse\n >;\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Person Group Person Face\", \"Delete Person Group Person\" or \"Delete Person Group\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Each person entry can hold up to 248 faces.\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n */\n post(\n options: AddPersonGroupPersonFaceParameters,\n ): StreamableMethod<\n AddPersonGroupPersonFace200Response | AddPersonGroupPersonFaceDefaultResponse\n >;\n}\n\nexport interface DeletePersonGroupPersonFace {\n /** Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from different persons are processed in parallel. */\n delete(\n options?: DeletePersonGroupPersonFaceParameters,\n ): StreamableMethod<\n DeletePersonGroupPersonFace200Response | DeletePersonGroupPersonFaceDefaultResponse\n >;\n /** Retrieve person face information. The persisted person face is specified by its personGroupId, personId and persistedFaceId. */\n get(\n options?: GetPersonGroupPersonFaceParameters,\n ): StreamableMethod<\n GetPersonGroupPersonFace200Response | GetPersonGroupPersonFaceDefaultResponse\n >;\n /** Update a person persisted face's userData field. */\n patch(\n options: UpdatePersonGroupPersonFaceParameters,\n ): StreamableMethod<\n UpdatePersonGroupPersonFace200Response | UpdatePersonGroupPersonFaceDefaultResponse\n >;\n}\n\nexport interface CreateLargePersonGroup {\n /**\n * A Large Person Group is a container holding the uploaded person data, including the face recognition features. It can hold up to 1,000,000 entities.\n *\n * After creation, use \"Create Large Person Group Person\" to add person into the group, and call \"Train Large Person Group\" to get this group ready for \"Identify From Large Person Group\".\n *\n * No image will be stored. Only the person's extracted face feature(s) and userData will be stored on server until \"Delete Large Person Group Person\" or \"Delete Large Person Group\" is called.\n *\n * 'recognitionModel' should be specified to associate with this Large Person Group. The default value for 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly specify the model you need in this parameter. New faces that are added to an existing Large Person Group will use the recognition model that's already associated with the collection. Existing face feature(s) in a Large Person Group can't be updated to features extracted by another version of recognition model.\n *\n * > [!NOTE]\n * >\n * > *\n * > * Free-tier subscription quota: 1,000 Large Person Groups.\n * > * S0-tier subscription quota: 1,000,000 Large Person Groups.\n */\n put(\n options: CreateLargePersonGroupParameters,\n ): StreamableMethod<CreateLargePersonGroup200Response | CreateLargePersonGroupDefaultResponse>;\n /** Delete an existing Large Person Group with specified personGroupId. Persisted data in this Large Person Group will be deleted. */\n delete(\n options?: DeleteLargePersonGroupParameters,\n ): StreamableMethod<DeleteLargePersonGroup200Response | DeleteLargePersonGroupDefaultResponse>;\n /** Retrieve the information of a Large Person Group, including its name, userData and recognitionModel. This API returns Large Person Group information only, use \"Get Large Person Group Persons\" instead to retrieve person information under the Large Person Group. */\n get(\n options?: GetLargePersonGroupParameters,\n ): StreamableMethod<GetLargePersonGroup200Response | GetLargePersonGroupDefaultResponse>;\n /** Update an existing Large Person Group's name and userData. The properties keep unchanged if they are not in request body. */\n patch(\n options: UpdateLargePersonGroupParameters,\n ): StreamableMethod<UpdateLargePersonGroup200Response | UpdateLargePersonGroupDefaultResponse>;\n}\n\nexport interface GetLargePersonGroups {\n /**\n * Large Person Groups are stored in alphabetical order of largePersonGroupId.\n * >\n * *\n * * \"start\" parameter (string, optional) specifies an ID value from which returned entries will have larger IDs based on string comparison. Setting \"start\" to an empty value indicates that entries should be returned starting from the first item.\n * * \"top\" parameter (int, optional) determines the maximum number of entries to be returned, with a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, specify \"start\" with the personId of the last entry returned in the current call.\n *\n * > [!TIP]\n * >\n * > * For example, there are total 5 items with their IDs: \"itemId1\", ..., \"itemId5\".\n * > * \"start=&top=\" will return all 5 items.\n * > * \"start=&top=2\" will return \"itemId1\", \"itemId2\".\n * > * \"start=itemId2&top=3\" will return \"itemId3\", \"itemId4\", \"itemId5\".\n */\n get(\n options?: GetLargePersonGroupsParameters,\n ): StreamableMethod<GetLargePersonGroups200Response | GetLargePersonGroupsDefaultResponse>;\n}\n\nexport interface GetLargePersonGroupTrainingStatus {\n /** Training time depends on the number of person entries, and their faces in a Large Person Group. It could be in seconds, or up to half an hour for 1,000,000 persons. */\n get(\n options?: GetLargePersonGroupTrainingStatusParameters,\n ): StreamableMethod<\n GetLargePersonGroupTrainingStatus200Response | GetLargePersonGroupTrainingStatusDefaultResponse\n >;\n}\n\nexport interface TrainLargePersonGroup {\n /** The training task is an asynchronous task. Training time depends on the number of person entries, and their faces in a Large Person Group. It could be in several seconds, or up to half a hour for 1,000,000 persons. To check training status, please use \"Get Large Person Group Training Status\". */\n post(\n options?: TrainLargePersonGroupParameters,\n ): StreamableMethod<TrainLargePersonGroup202Response | TrainLargePersonGroupDefaultResponse>;\n}\n\nexport interface CreateLargePersonGroupPerson {\n /**\n * > [!NOTE]\n * >\n * > *\n * > * Free-tier subscription quota:\n * > * 1,000 persons in all Large Person Groups.\n * > * S0-tier subscription quota:\n * > * 1,000,000 persons per Large Person Group.\n * > * 1,000,000 Large Person Groups.\n * > * 1,000,000,000 persons in all Large Person Groups.\n */\n post(\n options: CreateLargePersonGroupPersonParameters,\n ): StreamableMethod<\n CreateLargePersonGroupPerson200Response | CreateLargePersonGroupPersonDefaultResponse\n >;\n /**\n * Persons are stored in alphabetical order of personId created in \"Create Large Person Group Person\".\n * >\n * *\n * * \"start\" parameter (string, optional) specifies an ID value from which returned entries will have larger IDs based on string comparison. Setting \"start\" to an empty value indicates that entries should be returned starting from the first item.\n * * \"top\" parameter (int, optional) determines the maximum number of entries to be returned, with a limit of up to 1000 entries per call. To retrieve additional entries beyond this limit, specify \"start\" with the personId of the last entry returned in the current call.\n *\n * > [!TIP]\n * >\n * > * For example, there are total 5 items with their IDs: \"itemId1\", ..., \"itemId5\".\n * > * \"start=&top=\" will return all 5 items.\n * > * \"start=&top=2\" will return \"itemId1\", \"itemId2\".\n * > * \"start=itemId2&top=3\" will return \"itemId3\", \"itemId4\", \"itemId5\".\n */\n get(\n options?: GetLargePersonGroupPersonsParameters,\n ): StreamableMethod<\n GetLargePersonGroupPersons200Response | GetLargePersonGroupPersonsDefaultResponse\n >;\n}\n\nexport interface DeleteLargePersonGroupPerson {\n /** Delete an existing person from a Large Person Group. The persistedFaceId, userData, person name and face feature(s) in the person entry will all be deleted. */\n delete(\n options?: DeleteLargePersonGroupPersonParameters,\n ): StreamableMethod<\n DeleteLargePersonGroupPerson200Response | DeleteLargePersonGroupPersonDefaultResponse\n >;\n /** Retrieve a person's name and userData, and the persisted faceIds representing the registered person face feature(s). */\n get(\n options?: GetLargePersonGroupPersonParameters,\n ): StreamableMethod<\n GetLargePersonGroupPerson200Response | GetLargePersonGroupPersonDefaultResponse\n >;\n /** Update name or userData of a person. */\n patch(\n options: UpdateLargePersonGroupPersonParameters,\n ): StreamableMethod<\n UpdateLargePersonGroupPerson200Response | UpdateLargePersonGroupPersonDefaultResponse\n >;\n}\n\nexport interface AddLargePersonGroupPersonFaceFromUrl {\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Large Person Group Person Face\", \"Delete Large Person Group Person\" or \"Delete Large Person Group\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Each person entry can hold up to 248 faces.\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n */\n post(\n options: AddLargePersonGroupPersonFaceFromUrlParameters,\n ): StreamableMethod<\n | AddLargePersonGroupPersonFaceFromUrl200Response\n | AddLargePersonGroupPersonFaceFromUrlDefaultResponse\n >;\n /**\n * To deal with an image containing multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature(s) will be stored on server until \"Delete Large Person Group Person Face\", \"Delete Large Person Group Person\" or \"Delete Large Person Group\" is called.\n *\n * Note that persistedFaceId is different from faceId generated by \"Detect\".\n *\n * >\n * *\n * * Each person entry can hold up to 248 faces.\n * * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.\n * * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.\n * * \"targetFace\" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the provided \"targetFace\" rectangle is not returned from \"Detect\", there's no guarantee to detect and add the face successfully.\n * * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures.\n * * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.\n * * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to [here](https://learn.microsoft.com/azure/ai-services/computer-vision/how-to/specify-detection-model).\n */\n post(\n options: AddLargePersonGroupPersonFaceParameters,\n ): StreamableMethod<\n AddLargePersonGroupPersonFace200Response | AddLargePersonGroupPersonFaceDefaultResponse\n >;\n}\n\nexport interface DeleteLargePersonGroupPersonFace {\n /** Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from different persons are processed in parallel. */\n delete(\n options?: DeleteLargePersonGroupPersonFaceParameters,\n ): StreamableMethod<\n DeleteLargePersonGroupPersonFace200Response | DeleteLargePersonGroupPersonFaceDefaultResponse\n >;\n /** Retrieve person face information. The persisted person face is specified by its largePersonGroupId, personId and persistedFaceId. */\n get(\n options?: GetLargePersonGroupPersonFaceParameters,\n ): StreamableMethod<\n GetLargePersonGroupPersonFace200Response | GetLargePersonGroupPersonFaceDefaultResponse\n >;\n /** Update a person persisted face's userData field. */\n patch(\n options: UpdateLargePersonGroupPersonFaceParameters,\n ): StreamableMethod<\n UpdateLargePersonGroupPersonFace200Response | UpdateLargePersonGroupPersonFaceDefaultResponse\n >;\n}\n\nexport interface CreateLivenessSession {\n /**\n * A session is best for client device scenarios where developers want to authorize a client device to perform only a liveness detection without granting full access to their resource. Created sessions have a limited life span and only authorize clients to perform the desired action before access is expired.\n *\n * Permissions includes...\n * >\n * *\n * * Ability to call /detectLiveness/singleModal for up to 3 retries.\n * * A token lifetime of 10 minutes.\n *\n * > [!NOTE]\n * > Client access can be revoked by deleting the session using the Delete Liveness Session operation. To retrieve a result, use the Get Liveness Session. To audit the individual requests that a client has made to your resource, use the List Liveness Session Audit Entries.\n */\n post(\n options: CreateLivenessSessionParameters,\n ): StreamableMethod<CreateLivenessSession200Response | CreateLivenessSessionDefaultResponse>;\n}\n\nexport interface DeleteLivenessSession {\n /**\n * > [!NOTE]\n * > Deleting a session deactivates the Session Auth Token by blocking future API calls made with that Auth Token. While this can be used to remove any access for that token, those requests will still count towards overall resource rate limits. It's best to leverage TokenTTL to limit length of tokens in the case that it is misused.\n */\n delete(\n options?: DeleteLivenessSessionParameters,\n ): StreamableMethod<DeleteLivenessSession204Response | DeleteLivenessSessionDefaultResponse>;\n /** Get session result of detectLiveness/singleModal call. */\n get(\n options?: GetLivenessSessionResultParameters,\n ): StreamableMethod<\n GetLivenessSessionResult200Response | GetLivenessSessionResultDefaultResponse\n >;\n}\n\nexport interface CreateLivenessWithVerifySession {\n /**\n * A session is best for client device scenarios where developers want to authorize a client device to perform only a liveness detection without granting full access to their resource. Created sessions have a limited life span and only authorize clients to perform the desired action before access is expired.\n *\n * Permissions includes...\n * >\n * *\n * * Ability to call /detectLivenessWithVerify/singleModal for up to 3 retries.\n * * A token lifetime of 10 minutes.\n *\n * > [!NOTE]\n * >\n * > *\n * > * Client access can be revoked by deleting the session using the Delete Liveness With Verify Session operation.\n * > * To retrieve a result, use the Get Liveness With Verify Session.\n * > * To audit the individual requests that a client has made to your resource, use the List Liveness With Verify Session Audit Entries.\n */\n post(\n options: CreateLivenessWithVerifySessionParameters,\n ): StreamableMethod<\n CreateLivenessWithVerifySession200Response | CreateLivenessWithVerifySessionDefaultResponse\n >;\n}\n\nexport interface DeleteLivenessWithVerifySession {\n /**\n * > [!NOTE]\n * > Deleting a session deactivates the Session Auth Token by blocking future API calls made with that Auth Token. While this can be used to remove any access for that token, those requests will still count towards overall resource rate limits. It's best to leverage TokenTTL to limit length of tokens in the case that it is misused.\n */\n delete(\n options?: DeleteLivenessWithVerifySessionParameters,\n ): StreamableMethod<\n DeleteLivenessWithVerifySession204Response | DeleteLivenessWithVerifySessionDefaultResponse\n >;\n /** Get session result of detectLivenessWithVerify/singleModal call. */\n get(\n options?: GetLivenessWithVerifySessionResultParameters,\n ): StreamableMethod<\n | GetLivenessWithVerifySessionResult200Response\n | GetLivenessWithVerifySessionResultDefaultResponse\n >;\n}\n\nexport interface GetSessionImage {\n /** Get session image stored during the liveness session. */\n get(\n options?: GetSessionImageParameters,\n ): StreamableMethod<GetSessionImage200Response | GetSessionImageDefaultResponse>;\n}\n\nexport interface Routes {\n /** Resource for '/detect' has methods for the following verbs: post */\n (path: \"/detect\"): DetectFromUrl;\n /** Resource for '/findsimilars' has methods for the following verbs: post */\n (path: \"/findsimilars\"): FindSimilar;\n /** Resource for '/identify' has methods for the following verbs: post */\n (path: \"/identify\"): IdentifyFromPersonGroup;\n /** Resource for '/verify' has methods for the following verbs: post */\n (path: \"/verify\"): VerifyFaceToFace;\n /** Resource for '/group' has methods for the following verbs: post */\n (path: \"/group\"): Group;\n /** Resource for '/facelists/\\{faceListId\\}' has methods for the following verbs: put, delete, get, patch */\n (path: \"/facelists/{faceListId}\", faceListId: string): CreateFaceList;\n /** Resource for '/facelists' has methods for the following verbs: get */\n (path: \"/facelists\"): GetFaceLists;\n /** Resource for '/facelists/\\{faceListId\\}/persistedfaces' has methods for the following verbs: post */\n (path: \"/facelists/{faceListId}/persistedfaces\", faceListId: string): AddFaceListFaceFromUrl;\n /** Resource for '/facelists/\\{faceListId\\}/persistedfaces/\\{persistedFaceId\\}' has methods for the following verbs: delete */\n (\n path: \"/facelists/{faceListId}/persistedfaces/{persistedFaceId}\",\n faceListId: string,\n persistedFaceId: string,\n ): DeleteFaceListFace;\n /** Resource for '/largefacelists/\\{largeFaceListId\\}' has methods for the following verbs: put, delete, get, patch */\n (path: \"/largefacelists/{largeFaceListId}\", largeFaceListId: string): CreateLargeFaceList;\n /** Resource for '/largefacelists' has methods for the following verbs: get */\n (path: \"/largefacelists\"): GetLargeFaceLists;\n /** Resource for '/largefacelists/\\{largeFaceListId\\}/training' has methods for the following verbs: get */\n (\n path: \"/largefacelists/{largeFaceListId}/training\",\n largeFaceListId: string,\n ): GetLargeFaceListTrainingStatus;\n /** Resource for '/largefacelists/\\{largeFaceListId\\}/train' has methods for the following verbs: post */\n (path: \"/largefacelists/{largeFaceListId}/train\", largeFaceListId: string): TrainLargeFaceList;\n /** Resource for '/largefacelists/\\{largeFaceListId\\}/persistedfaces' has methods for the following verbs: post, get */\n (\n path: \"/largefacelists/{largeFaceListId}/persistedfaces\",\n largeFaceListId: string,\n ): AddLargeFaceListFaceFromUrl;\n /** Resource for '/largefacelists/\\{largeFaceListId\\}/persistedfaces/\\{persistedFaceId\\}' has methods for the following verbs: delete, get, patch */\n (\n path: \"/largefacelists/{largeFaceListId}/persistedfaces/{persistedFaceId}\",\n largeFaceListId: string,\n persistedFaceId: string,\n ): DeleteLargeFaceListFace;\n /** Resource for '/persongroups/\\{personGroupId\\}' has methods for the following verbs: put, delete, get, patch */\n (path: \"/persongroups/{personGroupId}\", personGroupId: string): CreatePersonGroup;\n /** Resource for '/persongroups' has methods for the following verbs: get */\n (path: \"/persongroups\"): GetPersonGroups;\n /** Resource for '/persongroups/\\{personGroupId\\}/training' has methods for the following verbs: get */\n (\n path: \"/persongroups/{personGroupId}/training\",\n personGroupId: string,\n ): GetPersonGroupTrainingStatus;\n /** Resource for '/persongroups/\\{personGroupId\\}/train' has methods for the following verbs: post */\n (path: \"/persongroups/{personGroupId}/train\", personGroupId: string): TrainPersonGroup;\n /** Resource for '/persongroups/\\{personGroupId\\}/persons' has methods for the following verbs: post, get */\n (path: \"/persongroups/{personGroupId}/persons\", personGroupId: string): CreatePersonGroupPerson;\n /** Resource for '/persongroups/\\{personGroupId\\}/persons/\\{personId\\}' has methods for the following verbs: delete, get, patch */\n (\n path: \"/persongroups/{personGroupId}/persons/{personId}\",\n personGroupId: string,\n personId: string,\n ): DeletePersonGroupPerson;\n /** Resource for '/persongroups/\\{personGroupId\\}/persons/\\{personId\\}/persistedfaces' has methods for the following verbs: post */\n (\n path: \"/persongroups/{personGroupId}/persons/{personId}/persistedfaces\",\n personGroupId: string,\n personId: string,\n ): AddPersonGroupPersonFaceFromUrl;\n /** Resource for '/persongroups/\\{personGroupId\\}/persons/\\{personId\\}/persistedfaces/\\{persistedFaceId\\}' has methods for the following verbs: delete, get, patch */\n (\n path: \"/persongroups/{personGroupId}/persons/{personId}/persistedfaces/{persistedFaceId}\",\n personGroupId: string,\n personId: string,\n persistedFaceId: string,\n ): DeletePersonGroupPersonFace;\n /** Resource for '/largepersongroups/\\{largePersonGroupId\\}' has methods for the following verbs: put, delete, get, patch */\n (\n path: \"/largepersongroups/{largePersonGroupId}\",\n largePersonGroupId: string,\n ): CreateLargePersonGroup;\n /** Resource for '/largepersongroups' has methods for the following verbs: get */\n (path: \"/largepersongroups\"): GetLargePersonGroups;\n /** Resource for '/largepersongroups/\\{largePersonGroupId\\}/training' has methods for the following verbs: get */\n (\n path: \"/largepersongroups/{largePersonGroupId}/training\",\n largePersonGroupId: string,\n ): GetLargePersonGroupTrainingStatus;\n /** Resource for '/largepersongroups/\\{largePersonGroupId\\}/train' has methods for the following verbs: post */\n (\n path: \"/largepersongroups/{largePersonGroupId}/train\",\n largePersonGroupId: string,\n ): TrainLargePersonGroup;\n /** Resource for '/largepersongroups/\\{largePersonGroupId\\}/persons' has methods for the following verbs: post, get */\n (\n path: \"/largepersongroups/{largePersonGroupId}/persons\",\n largePersonGroupId: string,\n ): CreateLargePersonGroupPerson;\n /** Resource for '/largepersongroups/\\{largePersonGroupId\\}/persons/\\{personId\\}' has methods for the following verbs: delete, get, patch */\n (\n path: \"/largepersongroups/{largePersonGroupId}/persons/{personId}\",\n largePersonGroupId: string,\n personId: string,\n ): DeleteLargePersonGroupPerson;\n /** Resource for '/largepersongroups/\\{largePersonGroupId\\}/persons/\\{personId\\}/persistedfaces' has methods for the following verbs: post */\n (\n path: \"/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces\",\n largePersonGroupId: string,\n personId: string,\n ): AddLargePersonGroupPersonFaceFromUrl;\n /** Resource for '/largepersongroups/\\{largePersonGroupId\\}/persons/\\{personId\\}/persistedfaces/\\{persistedFaceId\\}' has methods for the following verbs: delete, get, patch */\n (\n path: \"/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces/{persistedFaceId}\",\n largePersonGroupId: string,\n personId: string,\n persistedFaceId: string,\n ): DeleteLargePersonGroupPersonFace;\n /** Resource for '/detectLiveness-sessions' has methods for the following verbs: post */\n (path: \"/detectLiveness-sessions\"): CreateLivenessSession;\n /** Resource for '/detectLiveness-sessions/\\{sessionId\\}' has methods for the following verbs: delete, get */\n (path: \"/detectLiveness-sessions/{sessionId}\", sessionId: string): DeleteLivenessSession;\n /** Resource for '/detectLivenessWithVerify-sessions' has methods for the following verbs: post */\n (path: \"/detectLivenessWithVerify-sessions\"): CreateLivenessWithVerifySession;\n /** Resource for '/detectLivenessWithVerify-sessions/\\{sessionId\\}' has methods for the following verbs: delete, get */\n (\n path: \"/detectLivenessWithVerify-sessions/{sessionId}\",\n sessionId: string,\n ): DeleteLivenessWithVerifySession;\n /** Resource for '/sessionImages/\\{sessionImageId\\}' has methods for the following verbs: get */\n (path: \"/sessionImages/{sessionImageId}\", sessionImageId: string): GetSessionImage;\n}\n\nexport type FaceClient = Client & {\n path: Routes;\n};\n"]}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"faceClient.d.ts","sourceRoot":"","sources":["../../src/faceClient.ts"],"names":[],"mappings":"AAGA,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,yBAAyB,CAAC;AAG7D,OAAO,KAAK,EAAE,eAAe,EAAE,aAAa,EAAE,MAAM,kBAAkB,CAAC;AACvE,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,wBAAwB,CAAC;AACzD,OAAO,KAAK,EAAE,QAAQ,EAAE,MAAM,aAAa,CAAC;AAE5C,6CAA6C;AAC7C,MAAM,WAAW,iBAAkB,SAAQ,aAAa;IACtD,kBAAkB;IAClB,UAAU,CAAC,EAAE,QAAQ,CAAC;CACvB;AAED;;;;;;GAMG;AACH,MAAM,CAAC,OAAO,UAAU,YAAY,CAClC,aAAa,EAAE,MAAM,EACrB,WAAW,EAAE,eAAe,GAAG,aAAa,EAC5C,EAAE,
|
|
1
|
+
{"version":3,"file":"faceClient.d.ts","sourceRoot":"","sources":["../../src/faceClient.ts"],"names":[],"mappings":"AAGA,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,yBAAyB,CAAC;AAG7D,OAAO,KAAK,EAAE,eAAe,EAAE,aAAa,EAAE,MAAM,kBAAkB,CAAC;AACvE,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,wBAAwB,CAAC;AACzD,OAAO,KAAK,EAAE,QAAQ,EAAE,MAAM,aAAa,CAAC;AAE5C,6CAA6C;AAC7C,MAAM,WAAW,iBAAkB,SAAQ,aAAa;IACtD,kBAAkB;IAClB,UAAU,CAAC,EAAE,QAAQ,CAAC;CACvB;AAED;;;;;;GAMG;AACH,MAAM,CAAC,OAAO,UAAU,YAAY,CAClC,aAAa,EAAE,MAAM,EACrB,WAAW,EAAE,eAAe,GAAG,aAAa,EAC5C,EAAE,UAAmB,EAAE,GAAG,OAAO,EAAE,GAAE,iBAAsB,GAC1D,UAAU,CA0CZ"}
|
|
@@ -12,7 +12,7 @@ import { logger } from "./logger.js";
|
|
|
12
12
|
*/
|
|
13
13
|
export default function createClient(endpointParam, credentials, _a = {}) {
|
|
14
14
|
var _b, _c, _d, _e, _f, _g, _h, _j;
|
|
15
|
-
var { apiVersion = "v1.2
|
|
15
|
+
var { apiVersion = "v1.2" } = _a, options = __rest(_a, ["apiVersion"]);
|
|
16
16
|
const endpointUrl = (_c = (_b = options.endpoint) !== null && _b !== void 0 ? _b : options.baseUrl) !== null && _c !== void 0 ? _c : `${endpointParam}/face/${apiVersion}`;
|
|
17
17
|
const userAgentInfo = `azsdk-js-ai-vision-face-rest/1.0.0-beta.3`;
|
|
18
18
|
const userAgentPrefix = options.userAgentOptions && options.userAgentOptions.userAgentPrefix
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"faceClient.js","sourceRoot":"","sources":["../../src/faceClient.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;;AAGlC,OAAO,EAAE,SAAS,EAAE,MAAM,yBAAyB,CAAC;AACpD,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAC;AAWrC;;;;;;GAMG;AACH,MAAM,CAAC,OAAO,UAAU,YAAY,CAClC,aAAqB,EACrB,WAA4C,EAC5C,
|
|
1
|
+
{"version":3,"file":"faceClient.js","sourceRoot":"","sources":["../../src/faceClient.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;;AAGlC,OAAO,EAAE,SAAS,EAAE,MAAM,yBAAyB,CAAC;AACpD,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAC;AAWrC;;;;;;GAMG;AACH,MAAM,CAAC,OAAO,UAAU,YAAY,CAClC,aAAqB,EACrB,WAA4C,EAC5C,KAAyD,EAAE;;QAA3D,EAAE,UAAU,GAAG,MAAM,OAAsC,EAAjC,OAAO,cAAjC,cAAmC,CAAF;IAEjC,MAAM,WAAW,GAAG,MAAA,MAAA,OAAO,CAAC,QAAQ,mCAAI,OAAO,CAAC,OAAO,mCAAI,GAAG,aAAa,SAAS,UAAU,EAAE,CAAC;IACjG,MAAM,aAAa,GAAG,2CAA2C,CAAC;IAClE,MAAM,eAAe,GACnB,OAAO,CAAC,gBAAgB,IAAI,OAAO,CAAC,gBAAgB,CAAC,eAAe;QAClE,CAAC,CAAC,GAAG,OAAO,CAAC,gBAAgB,CAAC,eAAe,IAAI,aAAa,EAAE;QAChE,CAAC,CAAC,GAAG,aAAa,EAAE,CAAC;IACzB,OAAO,mCACF,OAAO,KACV,gBAAgB,EAAE;YAChB,eAAe;SAChB,EACD,cAAc,EAAE;YACd,MAAM,EAAE,MAAA,MAAA,OAAO,CAAC,cAAc,0CAAE,MAAM,mCAAI,MAAM,CAAC,IAAI;SACtD,EACD,WAAW,EAAE;YACX,MAAM,EAAE,MAAA,MAAA,OAAO,CAAC,WAAW,0CAAE,MAAM,mCAAI,CAAC,8CAA8C,CAAC;YACvF,gBAAgB,EAAE,MAAA,MAAA,OAAO,CAAC,WAAW,0CAAE,gBAAgB,mCAAI,2BAA2B;SACvF,GACF,CAAC;IACF,MAAM,MAAM,GAAG,SAAS,CAAC,WAAW,EAAE,WAAW,EAAE,OAAO,CAAe,CAAC;IAE1E,MAAM,CAAC,QAAQ,CAAC,YAAY,CAAC,EAAE,IAAI,EAAE,kBAAkB,EAAE,CAAC,CAAC;IAE3D,MAAM,CAAC,QAAQ,CAAC,SAAS,CAAC;QACxB,IAAI,EAAE,2BAA2B;QACjC,WAAW,EAAE,CAAC,OAAO,EAAE,IAAI,EAAE,EAAE;;YAC7B,KAAK,MAAM,IAAI,IAAI,MAAA,MAAA,OAAO,CAAC,aAAa,0CAAE,KAAK,mCAAI,EAAE,EAAE,CAAC;gBACtD,MAAM,kBAAkB,GAAG,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,qBAAqB,CAAC,CAAC;gBACnE,IACE,kBAAkB;oBAClB,kBAAkB,CAAC,QAAQ,CAAC,oBAAoB,CAAC;oBACjD,CAAC,kBAAkB,CAAC,QAAQ,CAAC,WAAW,CAAC,EACzC,CAAC;oBACD,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,qBAAqB,EAAE,gDAAgD,CAAC,CAAC;gBAC5F,CAAC;YACH,CAAC;YACD,OAAO,IAAI,CAAC,OAAO,CAAC,CAAC;QACvB,CAAC;KACF,CAAC,CAAC;IAEH,OAAO,MAAM,CAAC;AAChB,CAAC","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT License.\n\nimport type { ClientOptions } from \"@azure-rest/core-client\";\nimport { getClient } from \"@azure-rest/core-client\";\nimport { logger } from \"./logger.js\";\nimport type { TokenCredential, KeyCredential } from \"@azure/core-auth\";\nimport type { FaceClient } from \"./clientDefinitions.js\";\nimport type { Versions } from \"./models.js\";\n\n/** The optional parameters for the client */\nexport interface FaceClientOptions extends ClientOptions {\n /** API Version */\n apiVersion?: Versions;\n}\n\n/**\n * Initialize a new instance of `FaceClient`\n * @param endpointParam - Supported Cognitive Services endpoints (protocol and hostname, for example:\n * https://{resource-name}.cognitiveservices.azure.com).\n * @param credentials - uniquely identify client credential\n * @param options - the parameter for all optional parameters\n */\nexport default function createClient(\n endpointParam: string,\n credentials: TokenCredential | KeyCredential,\n { apiVersion = \"v1.2\", ...options }: FaceClientOptions = {},\n): FaceClient {\n const endpointUrl = options.endpoint ?? options.baseUrl ?? `${endpointParam}/face/${apiVersion}`;\n const userAgentInfo = `azsdk-js-ai-vision-face-rest/1.0.0-beta.3`;\n const userAgentPrefix =\n options.userAgentOptions && options.userAgentOptions.userAgentPrefix\n ? `${options.userAgentOptions.userAgentPrefix} ${userAgentInfo}`\n : `${userAgentInfo}`;\n options = {\n ...options,\n userAgentOptions: {\n userAgentPrefix,\n },\n loggingOptions: {\n logger: options.loggingOptions?.logger ?? logger.info,\n },\n credentials: {\n scopes: options.credentials?.scopes ?? [\"https://cognitiveservices.azure.com/.default\"],\n apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? \"Ocp-Apim-Subscription-Key\",\n },\n };\n const client = getClient(endpointUrl, credentials, options) as FaceClient;\n\n client.pipeline.removePolicy({ name: \"ApiVersionPolicy\" });\n\n client.pipeline.addPolicy({\n name: \"VerifyImageFilenamePolicy\",\n sendRequest: (request, next) => {\n for (const part of request.multipartBody?.parts ?? []) {\n const contentDisposition = part.headers.get(\"content-disposition\");\n if (\n contentDisposition &&\n contentDisposition.includes(`name=\"VerifyImage\"`) &&\n !contentDisposition.includes(\"filename=\")\n ) {\n part.headers.set(\"content-disposition\", `form-data; name=\"VerifyImage\"; filename=\"blob\"`);\n }\n }\n return next(request);\n },\n });\n\n return client;\n}\n"]}
|