@azure-rest/ai-vision-face 1.0.0-alpha.20241216.1 → 1.0.0-alpha.20241219.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/README.md +135 -115
  2. package/package.json +2 -2
package/README.md CHANGED
@@ -77,7 +77,7 @@ A custom subdomain, on the other hand, is a name that is unique to the resource.
77
77
  #### Create the client with a Microsoft Entra ID credential
78
78
 
79
79
  `AzureKeyCredential` authentication is used in the examples in this getting started guide, but you can also authenticate with Microsoft Entra ID using the [@azure/identity](https://learn.microsoft.com/javascript/api/@azure/identity/?view=azure-node-latest) library.
80
- Note that regional endpoints do not support Microsoft Entra ID authentication. Create a [custom subdomain](https://docs.microsoft.com/azure/cognitive-services/authentication#create-a-resource-with-a-custom-subdomain) name for your resource in order to use this type of authentication.
80
+ Note that regional endpoints do not support Microsoft Entra ID authentication. Create a [custom subdomain](https://learn.microsoft.com/azure/cognitive-services/authentication#create-a-resource-with-a-custom-subdomain) name for your resource in order to use this type of authentication.
81
81
 
82
82
  To use the [DefaultAzureCredential](https://learn.microsoft.com/javascript/api/@azure/identity/defaultazurecredential?view=azure-node-latest) type shown below, or other credential types provided with the Azure SDK, please install the `@azure/identity` package:
83
83
 
@@ -85,7 +85,7 @@ To use the [DefaultAzureCredential](https://learn.microsoft.com/javascript/api/@
85
85
  npm install --save @azure/identity
86
86
  ```
87
87
 
88
- You will also need to [register a new Microsoft Entra ID application and grant access](https://docs.microsoft.com/azure/cognitive-services/authentication#assign-a-role-to-a-service-principal) to Face by assigning the `"Cognitive Services User"` role to your service principal.
88
+ You will also need to [register a new Microsoft Entra ID application and grant access](https://learn.microsoft.com/azure/cognitive-services/authentication#assign-a-role-to-a-service-principal) to Face by assigning the `"Cognitive Services User"` role to your service principal.
89
89
 
90
90
  Once completed, set the values of the client ID, tenant ID, and client secret of the Microsoft Entra ID application as environment variables:
91
91
  `AZURE_CLIENT_ID`, `AZURE_TENANT_ID`, `AZURE_CLIENT_SECRET`.
@@ -95,10 +95,10 @@ Once completed, set the values of the client ID, tenant ID, and client secret of
95
95
  * DefaultAzureCredential will use the values from these environment
96
96
  * variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET
97
97
  */
98
- import { DefaultAzureCredential } from '@azure/identity';
99
- import createFaceClient from '@azure-rest/ai-vision-face';
98
+ import { DefaultAzureCredential } from "@azure/identity";
99
+ import createFaceClient from "@azure-rest/ai-vision-face";
100
100
 
101
- const endpoint = process.env['FACE_ENDPOINT'] || '<endpoint>';
101
+ const endpoint = process.env["FACE_ENDPOINT"] || "<endpoint>";
102
102
  const credential = new DefaultAzureCredential();
103
103
  const client = createFaceClient(endpoint, credential);
104
104
  ```
@@ -114,11 +114,11 @@ az cognitiveservices account keys list --name "<resource-name>" --resource-group
114
114
  ```
115
115
 
116
116
  ```js
117
- import { AzureKeyCredential } from '@azure/core-auth';
118
- import createFaceClient from '@azure-rest/ai-vision-face';
117
+ import { AzureKeyCredential } from "@azure/core-auth";
118
+ import createFaceClient from "@azure-rest/ai-vision-face";
119
119
 
120
- const endpoint = process.env['FACE_ENDPOINT'] || '<endpoint>';
121
- const apikey = process.env['FACE_APIKEY'] || '<apikey>';
120
+ const endpoint = process.env["FACE_ENDPOINT"] || "<endpoint>";
121
+ const apikey = process.env["FACE_APIKEY"] || "<apikey>";
122
122
  const credential = new AzureKeyCredential(apikey);
123
123
  const client = createFaceClient(endpoint, credential);
124
124
  ```
@@ -163,33 +163,31 @@ The following section provides several code snippets covering some of the most c
163
163
  Detect faces and analyze them from an binary data.
164
164
 
165
165
  ```js
166
- import { readFileSync } from 'fs';
167
- import { AzureKeyCredential } from '@azure/core-auth';
166
+ import { readFileSync } from "fs";
167
+ import { AzureKeyCredential } from "@azure/core-auth";
168
168
 
169
- import createFaceClient, {
170
- isUnexpected,
171
- } from '@azure-rest/ai-vision-face';
169
+ import createFaceClient, { isUnexpected } from "@azure-rest/ai-vision-face";
172
170
 
173
- const endpoint = process.env['FACE_ENDPOINT'] || '<endpoint>';
174
- const apikey = process.env['FACE_APIKEY'] || '<apikey>';
171
+ const endpoint = process.env["FACE_ENDPOINT"] || "<endpoint>";
172
+ const apikey = process.env["FACE_APIKEY"] || "<apikey>";
175
173
  const credential = new AzureKeyCredential(apikey);
176
174
  const client = createFaceClient(endpoint, credential);
177
175
 
178
- const response = await client.path('/detect').post({
179
- contentType: 'application/octet-stream',
180
- queryParameters: {
181
- detectionModel: 'detection_03',
182
- recognitionModel: 'recognition_04',
183
- returnFaceLandmarks: true,
184
- returnRecognitionModel: true,
185
- faceIdTimeToLive: 120,
186
- returnFaceAttributes: ['headPose', 'mask', 'qualityForRecognition'],
187
- returnFaceId: false,
188
- },
189
- body: readFileSync('path/to/test/image'),
176
+ const response = await client.path("/detect").post({
177
+ contentType: "application/octet-stream",
178
+ queryParameters: {
179
+ detectionModel: "detection_03",
180
+ recognitionModel: "recognition_04",
181
+ returnFaceLandmarks: true,
182
+ returnRecognitionModel: true,
183
+ faceIdTimeToLive: 120,
184
+ returnFaceAttributes: ["headPose", "mask", "qualityForRecognition"],
185
+ returnFaceId: false,
186
+ },
187
+ body: readFileSync("path/to/test/image"),
190
188
  });
191
189
  if (isUnexpected(response)) {
192
- throw new Error(response.body.error.message);
190
+ throw new Error(response.body.error.message);
193
191
  }
194
192
  console.log(response.body);
195
193
  ```
@@ -201,75 +199,92 @@ Identify a face against a defined LargePersonGroup.
201
199
  First, we have to create a LargePersonGroup, add a few Persons to it, and then register faces with these Persons.
202
200
 
203
201
  ```js
204
- import { readFileSync } from 'fs';
205
- import { AzureKeyCredential } from '@azure/core-auth';
202
+ import { readFileSync } from "fs";
203
+ import { AzureKeyCredential } from "@azure/core-auth";
206
204
 
207
- import createFaceClient, {
208
- getLongRunningPoller,
209
- isUnexpected,
210
- } from '@azure-rest/ai-vision-face';
205
+ import createFaceClient, { getLongRunningPoller, isUnexpected } from "@azure-rest/ai-vision-face";
211
206
 
212
- const endpoint = process.env['FACE_ENDPOINT'] || '<endpoint>';
213
- const apikey = process.env['FACE_APIKEY'] || '<apikey>';
207
+ const endpoint = process.env["FACE_ENDPOINT"] || "<endpoint>";
208
+ const apikey = process.env["FACE_APIKEY"] || "<apikey>";
214
209
  const credential = new AzureKeyCredential(apikey);
215
210
  const client = createFaceClient(endpoint, credential);
216
211
 
217
- const largePersonGroupId = 'lpg_family';
212
+ const largePersonGroupId = "lpg_family";
218
213
 
219
214
  console.log(`Create a large person group with id: ${largePersonGroupId}`);
220
- const createLargePersonGroupResponse = await client.path('/largepersongroups/{largePersonGroupId}', largePersonGroupId).put({
215
+ const createLargePersonGroupResponse = await client
216
+ .path("/largepersongroups/{largePersonGroupId}", largePersonGroupId)
217
+ .put({
221
218
  body: {
222
- name: 'My Family',
223
- recognitionModel: 'recognition_04',
219
+ name: "My Family",
220
+ recognitionModel: "recognition_04",
224
221
  },
225
- });
222
+ });
226
223
 
227
- console.log('Create a Person Bill and add a face to him.');
228
- const createLargePersonGroupPersonResponse_bill = await client.path('/largepersongroups/{largePersonGroupId}/persons', largePersonGroupId).post({
224
+ console.log("Create a Person Bill and add a face to him.");
225
+ const createLargePersonGroupPersonResponse_bill = await client
226
+ .path("/largepersongroups/{largePersonGroupId}/persons", largePersonGroupId)
227
+ .post({
229
228
  body: {
230
- name: 'Bill',
231
- userData: 'Dad',
229
+ name: "Bill",
230
+ userData: "Dad",
232
231
  },
233
- });
232
+ });
234
233
  if (isUnexpected(createLargePersonGroupPersonResponse_bill)) {
235
- throw new Error(createLargePersonGroupPersonResponse_bill.body.error.message);
234
+ throw new Error(createLargePersonGroupPersonResponse_bill.body.error.message);
236
235
  }
237
236
  const personId_bill = createLargePersonGroupPersonResponse_bill.body.personId;
238
- await client.path('/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces', largePersonGroupId, personId_bill).post({
237
+ await client
238
+ .path(
239
+ "/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces",
240
+ largePersonGroupId,
241
+ personId_bill,
242
+ )
243
+ .post({
239
244
  queryParameters: {
240
- userData: 'Dad-0001',
241
- detectionModel: 'detection_03',
245
+ userData: "Dad-0001",
246
+ detectionModel: "detection_03",
242
247
  },
243
- contentType: 'application/octet-stream',
244
- body: readFileSync('path/to/bill/image'),
245
- });
246
-
247
- console.log('Create a Person Clare and add a face to her.');
248
- const createLargePersonGroupPersonResponse_clare = await client.path('/largepersongroups/{largePersonGroupId}/persons', largePersonGroupId).post({
248
+ contentType: "application/octet-stream",
249
+ body: readFileSync("path/to/bill/image"),
250
+ });
251
+
252
+ console.log("Create a Person Clare and add a face to her.");
253
+ const createLargePersonGroupPersonResponse_clare = await client
254
+ .path("/largepersongroups/{largePersonGroupId}/persons", largePersonGroupId)
255
+ .post({
249
256
  body: {
250
- name: 'Clare',
251
- userData: 'Mom',
257
+ name: "Clare",
258
+ userData: "Mom",
252
259
  },
253
- });
260
+ });
254
261
  if (isUnexpected(createLargePersonGroupPersonResponse_clare)) {
255
- throw new Error(createLargePersonGroupPersonResponse_clare.body.error.message);
262
+ throw new Error(createLargePersonGroupPersonResponse_clare.body.error.message);
256
263
  }
257
264
  const personId_clare = createLargePersonGroupPersonResponse_clare.body.personId;
258
- await client.path('/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces', largePersonGroupId, personId_clare).post({
265
+ await client
266
+ .path(
267
+ "/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces",
268
+ largePersonGroupId,
269
+ personId_clare,
270
+ )
271
+ .post({
259
272
  queryParameters: {
260
- userData: 'Mom-0001',
261
- detectionModel: 'detection_03',
273
+ userData: "Mom-0001",
274
+ detectionModel: "detection_03",
262
275
  },
263
- contentType: 'application/octet-stream',
264
- body: readFileSync('path/to/clare/image'),
265
- });
276
+ contentType: "application/octet-stream",
277
+ body: readFileSync("path/to/clare/image"),
278
+ });
266
279
  ```
267
280
 
268
281
  Before doing the identification, we must train the LargePersonGroup first.
269
282
 
270
283
  ```js
271
284
  console.log(`Start to train the large person group: ${largePersonGroupId}`);
272
- const trainResponse = await client.path('/largepersongroups/{largePersonGroupId}/train', largePersonGroupId).post();
285
+ const trainResponse = await client
286
+ .path("/largepersongroups/{largePersonGroupId}/train", largePersonGroupId)
287
+ .post();
273
288
  const trainPoller = await getLongRunningPoller(client, trainResponse);
274
289
  await trainPoller.pollUntilDone();
275
290
  // Check if poller.getOperationState().status is 'succeeded'.
@@ -307,10 +322,11 @@ Finally, remove the large person group if you don't need it anymore.
307
322
 
308
323
  ```js
309
324
  console.log(`Delete the large person group: ${largePersonGroupId}`);
310
- await client.path('/largepersongroups/{largePersonGroupId}', largePersonGroupId).delete();
325
+ await client.path("/largepersongroups/{largePersonGroupId}", largePersonGroupId).delete();
311
326
  ```
312
327
 
313
328
  ### Liveness detection
329
+
314
330
  Face Liveness detection can be used to determine if a face in an input video stream is real (live) or fake (spoof).
315
331
  The goal of liveness detection is to ensure that the system is interacting with a physically present live person at
316
332
  the time of authentication. The whole process of authentication is called a session.
@@ -330,39 +346,41 @@ integrate the UI and the code into your native frontend application, please foll
330
346
  Here is an example to create and get the liveness detection result of a session.
331
347
 
332
348
  ```js
333
- import { randomUUID } from 'crypto';
349
+ import { randomUUID } from "crypto";
334
350
 
335
- import { AzureKeyCredential } from '@azure/core-auth';
351
+ import { AzureKeyCredential } from "@azure/core-auth";
336
352
 
337
- import createFaceClient, {
338
- isUnexpected,
339
- } from '@azure-rest/ai-vision-face';
353
+ import createFaceClient, { isUnexpected } from "@azure-rest/ai-vision-face";
340
354
 
341
- const endpoint = process.env['FACE_ENDPOINT'] || '<endpoint>';
342
- const apikey = process.env['FACE_APIKEY'] || '<apikey>';
355
+ const endpoint = process.env["FACE_ENDPOINT"] || "<endpoint>";
356
+ const apikey = process.env["FACE_APIKEY"] || "<apikey>";
343
357
  const credential = new AzureKeyCredential(apikey);
344
358
  const client = createFaceClient(endpoint, credential);
345
359
 
346
- console.log('Create a new liveness session.');
347
- const createLivenessSessionResponse = await client.path('/detectLiveness/singleModal/sessions').post({
360
+ console.log("Create a new liveness session.");
361
+ const createLivenessSessionResponse = await client
362
+ .path("/detectLiveness/singleModal/sessions")
363
+ .post({
348
364
  body: {
349
- livenessOperationMode: 'Passive',
350
- deviceCorrelationId: randomUUID(),
351
- sendResultsToClient: false,
352
- authTokenTimeToLiveInSeconds: 60,
365
+ livenessOperationMode: "Passive",
366
+ deviceCorrelationId: randomUUID(),
367
+ sendResultsToClient: false,
368
+ authTokenTimeToLiveInSeconds: 60,
353
369
  },
354
- });
370
+ });
355
371
  if (isUnexpected(createLivenessSessionResponse)) {
356
- throw new Error(createLivenessSessionResponse.body.error.message);
372
+ throw new Error(createLivenessSessionResponse.body.error.message);
357
373
  }
358
374
  console.log(createLivenessSessionResponse.body);
359
375
 
360
376
  const { sessionId } = createLivenessSessionResponse.body;
361
377
 
362
- console.log('Get liveness detection results.');
363
- const getLivenessSessionResponse = await client.path('/detectLiveness/singleModal/sessions/{sessionId}', sessionId).get();
378
+ console.log("Get liveness detection results.");
379
+ const getLivenessSessionResponse = await client
380
+ .path("/detectLiveness/singleModal/sessions/{sessionId}", sessionId)
381
+ .get();
364
382
  if (isUnexpected(getLivenessSessionResponse)) {
365
- throw new Error(getLivenessSessionResponse.body.error.message);
383
+ throw new Error(getLivenessSessionResponse.body.error.message);
366
384
  }
367
385
  console.log(getLivenessSessionResponse.body);
368
386
  ```
@@ -370,50 +388,52 @@ console.log(getLivenessSessionResponse.body);
370
388
  Here is another example for the liveness detection with face verification.
371
389
 
372
390
  ```js
373
- import { randomUUID } from 'crypto';
374
- import { readFileSync } from 'fs';
391
+ import { randomUUID } from "crypto";
392
+ import { readFileSync } from "fs";
375
393
 
376
- import { AzureKeyCredential } from '@azure/core-auth';
394
+ import { AzureKeyCredential } from "@azure/core-auth";
377
395
 
378
- import createFaceClient, {
379
- isUnexpected,
380
- } from '@azure-rest/ai-vision-face';
396
+ import createFaceClient, { isUnexpected } from "@azure-rest/ai-vision-face";
381
397
 
382
- const endpoint = process.env['FACE_ENDPOINT'] || '<endpoint>';
383
- const apikey = process.env['FACE_APIKEY'] || '<apikey>';
398
+ const endpoint = process.env["FACE_ENDPOINT"] || "<endpoint>";
399
+ const apikey = process.env["FACE_APIKEY"] || "<apikey>";
384
400
  const credential = new AzureKeyCredential(apikey);
385
401
  const client = createFaceClient(endpoint, credential);
386
402
 
387
- console.log('Create a new liveness with verify session with verify image.');
388
- const createLivenessSessionResponse = await client.path('/detectLivenessWithVerify/singleModal/sessions').post({
389
- contentType: 'multipart/form-data',
403
+ console.log("Create a new liveness with verify session with verify image.");
404
+ const createLivenessSessionResponse = await client
405
+ .path("/detectLivenessWithVerify/singleModal/sessions")
406
+ .post({
407
+ contentType: "multipart/form-data",
390
408
  body: [
391
- {
392
- name: 'VerifyImage',
393
- body: readFileSync('path/to/verify/image'),
394
- },
395
- {
396
- name: 'Parameters',
397
- body: {
398
- livenessOperationMode: 'Passive',
399
- sendResultsToClient: false,
400
- authTokenTimeToLiveInSeconds: 60,
401
- deviceCorrelationId: randomUUID(),
402
- },
409
+ {
410
+ name: "VerifyImage",
411
+ body: readFileSync("path/to/verify/image"),
412
+ },
413
+ {
414
+ name: "Parameters",
415
+ body: {
416
+ livenessOperationMode: "Passive",
417
+ sendResultsToClient: false,
418
+ authTokenTimeToLiveInSeconds: 60,
419
+ deviceCorrelationId: randomUUID(),
403
420
  },
421
+ },
404
422
  ],
405
- });
423
+ });
406
424
  if (isUnexpected(createLivenessSessionResponse)) {
407
- throw new Error(createLivenessSessionResponse.body.error.message);
425
+ throw new Error(createLivenessSessionResponse.body.error.message);
408
426
  }
409
427
  console.log(createLivenessSessionResponse.body);
410
428
 
411
429
  const { sessionId } = createLivenessSessionResponse.body;
412
430
 
413
- console.log('Get the liveness detection and verification result.');
414
- const getLivenessSessionResultResponse = await client.path('/detectLivenessWithVerify/singleModal/sessions/{sessionId}', sessionId).get();
431
+ console.log("Get the liveness detection and verification result.");
432
+ const getLivenessSessionResultResponse = await client
433
+ .path("/detectLivenessWithVerify/singleModal/sessions/{sessionId}", sessionId)
434
+ .get();
415
435
  if (isUnexpected(getLivenessSessionResultResponse)) {
416
- throw new Error(getLivenessSessionResultResponse.body.error.message);
436
+ throw new Error(getLivenessSessionResultResponse.body.error.message);
417
437
  }
418
438
  console.log(getLivenessSessionResultResponse.body);
419
439
  ```
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@azure-rest/ai-vision-face",
3
- "version": "1.0.0-alpha.20241216.1",
3
+ "version": "1.0.0-alpha.20241219.1",
4
4
  "description": "Face API REST Client",
5
5
  "engines": {
6
6
  "node": ">=18.0.0"
@@ -70,7 +70,7 @@
70
70
  "@types/node": "^18.0.0",
71
71
  "eslint": "^9.9.0",
72
72
  "prettier": "^3.2.5",
73
- "typescript": "~5.6.3",
73
+ "typescript": "~5.7.2",
74
74
  "tshy": "^1.11.1",
75
75
  "@azure/identity": "^4.2.1",
76
76
  "@vitest/browser": "^2.0.5",