@azure/ai-form-recognizer 4.0.0-beta.4 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. package/README.md +206 -184
  2. package/dist/index.js +588 -1992
  3. package/dist/index.js.map +1 -1
  4. package/dist-esm/src/constants.js +1 -1
  5. package/dist-esm/src/constants.js.map +1 -1
  6. package/dist-esm/src/documentAnalysisClient.js +30 -227
  7. package/dist-esm/src/documentAnalysisClient.js.map +1 -1
  8. package/dist-esm/src/documentModel.js +71 -0
  9. package/dist-esm/src/documentModel.js.map +1 -0
  10. package/dist-esm/src/documentModelAdministrationClient.js +59 -59
  11. package/dist-esm/src/documentModelAdministrationClient.js.map +1 -1
  12. package/dist-esm/src/generated/generatedClient.js +63 -101
  13. package/dist-esm/src/generated/generatedClient.js.map +1 -1
  14. package/dist-esm/src/generated/models/index.js +40 -9
  15. package/dist-esm/src/generated/models/index.js.map +1 -1
  16. package/dist-esm/src/generated/models/mappers.js +207 -205
  17. package/dist-esm/src/generated/models/mappers.js.map +1 -1
  18. package/dist-esm/src/generated/models/parameters.js +4 -38
  19. package/dist-esm/src/generated/models/parameters.js.map +1 -1
  20. package/dist-esm/src/index.js +1 -1
  21. package/dist-esm/src/index.js.map +1 -1
  22. package/dist-esm/src/lro/{training.js → administration.js} +4 -4
  23. package/dist-esm/src/lro/administration.js.map +1 -0
  24. package/dist-esm/src/lro/{analyze.js → analysis.js} +12 -11
  25. package/dist-esm/src/lro/analysis.js.map +1 -0
  26. package/dist-esm/src/models/documentElements.js.map +1 -1
  27. package/dist-esm/src/models/index.js.map +1 -1
  28. package/dist-esm/src/options/AnalyzeDocumentsOptions.js.map +1 -1
  29. package/dist-esm/src/options/{CopyModelOptions.js → BeginCopyModelOptions.js} +1 -1
  30. package/dist-esm/src/options/BeginCopyModelOptions.js.map +1 -0
  31. package/dist-esm/src/options/BuildModelOptions.js.map +1 -1
  32. package/dist-esm/src/options/DeleteModelOptions.js.map +1 -1
  33. package/dist-esm/src/options/FormRecognizerClientOptions.js +3 -2
  34. package/dist-esm/src/options/FormRecognizerClientOptions.js.map +1 -1
  35. package/dist-esm/src/options/{GetInfoOptions.js → GetResourceDetailsOptions.js} +1 -1
  36. package/dist-esm/src/options/GetResourceDetailsOptions.js.map +1 -0
  37. package/dist-esm/src/options/index.js.map +1 -1
  38. package/dist-esm/src/util.js.map +1 -1
  39. package/package.json +16 -10
  40. package/types/ai-form-recognizer.d.ts +388 -1753
  41. package/dist-esm/src/generated/generatedClientContext.js +0 -41
  42. package/dist-esm/src/generated/generatedClientContext.js.map +0 -1
  43. package/dist-esm/src/lro/analyze.js.map +0 -1
  44. package/dist-esm/src/lro/training.js.map +0 -1
  45. package/dist-esm/src/models/GeneralDocumentResult.js +0 -13
  46. package/dist-esm/src/models/GeneralDocumentResult.js.map +0 -1
  47. package/dist-esm/src/models/LayoutResult.js +0 -15
  48. package/dist-esm/src/models/LayoutResult.js.map +0 -1
  49. package/dist-esm/src/models/ReadResult.js +0 -18
  50. package/dist-esm/src/models/ReadResult.js.map +0 -1
  51. package/dist-esm/src/options/CopyModelOptions.js.map +0 -1
  52. package/dist-esm/src/options/GetInfoOptions.js.map +0 -1
  53. package/dist-esm/src/prebuilt/index.js +0 -9
  54. package/dist-esm/src/prebuilt/index.js.map +0 -1
  55. package/dist-esm/src/prebuilt/modelSchemas/businessCard.js +0 -5
  56. package/dist-esm/src/prebuilt/modelSchemas/businessCard.js.map +0 -1
  57. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-businessCard.json.js +0 -93
  58. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-businessCard.json.js.map +0 -1
  59. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-document.json.js +0 -12
  60. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-document.json.js.map +0 -1
  61. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-healthInsuranceCard.us.json.js +0 -153
  62. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-healthInsuranceCard.us.json.js.map +0 -1
  63. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-idDocument.json.js +0 -89
  64. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-idDocument.json.js.map +0 -1
  65. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-invoice.json.js +0 -144
  66. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-invoice.json.js.map +0 -1
  67. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-layout.json.js +0 -12
  68. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-layout.json.js.map +0 -1
  69. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-read.json.js +0 -12
  70. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-read.json.js.map +0 -1
  71. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-receipt.json.js +0 -341
  72. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-receipt.json.js.map +0 -1
  73. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-tax.us.w2.json.js +0 -155
  74. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-tax.us.w2.json.js.map +0 -1
  75. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-vaccinationCard.json.js +0 -50
  76. package/dist-esm/src/prebuilt/modelSchemas/data/prebuilt-vaccinationCard.json.js.map +0 -1
  77. package/dist-esm/src/prebuilt/modelSchemas/healthInsuranceCard.js +0 -5
  78. package/dist-esm/src/prebuilt/modelSchemas/healthInsuranceCard.js.map +0 -1
  79. package/dist-esm/src/prebuilt/modelSchemas/idDocument.js +0 -5
  80. package/dist-esm/src/prebuilt/modelSchemas/idDocument.js.map +0 -1
  81. package/dist-esm/src/prebuilt/modelSchemas/invoice.js +0 -5
  82. package/dist-esm/src/prebuilt/modelSchemas/invoice.js.map +0 -1
  83. package/dist-esm/src/prebuilt/modelSchemas/receipt.js +0 -5
  84. package/dist-esm/src/prebuilt/modelSchemas/receipt.js.map +0 -1
  85. package/dist-esm/src/prebuilt/modelSchemas/vaccinationCard.js +0 -5
  86. package/dist-esm/src/prebuilt/modelSchemas/vaccinationCard.js.map +0 -1
  87. package/dist-esm/src/prebuilt/modelSchemas/w2.js +0 -5
  88. package/dist-esm/src/prebuilt/modelSchemas/w2.js.map +0 -1
  89. package/dist-esm/src/prebuilt/models.js +0 -150
  90. package/dist-esm/src/prebuilt/models.js.map +0 -1
  91. package/dist-esm/src/prebuilt/schema.js +0 -4
  92. package/dist-esm/src/prebuilt/schema.js.map +0 -1
package/README.md CHANGED
@@ -14,19 +14,19 @@ Azure Cognitive Services [Form Recognizer](https://azure.microsoft.com/services/
14
14
  [Product documentation](https://docs.microsoft.com/azure/cognitive-services/form-recognizer/) |
15
15
  [Samples](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/formrecognizer/ai-form-recognizer/samples)
16
16
 
17
- #### **_Breaking Change Advisory_ ⚠️**
17
+ #### **_Breaking change advisory_ ⚠️**
18
18
 
19
- In version 4 (currently beta), this package introduces a full redesign of the Azure Form Recognizer client library. To leverage features of the newest Form Recognizer service API (version "2022-06-30-preview" and newer), the new SDK is required, and application code must be changed to use the new clients. Please see the [Migration Guide](https://github.com/azure/azure-sdk-for-js/blob/main/sdk/formrecognizer/ai-form-recognizer/MIGRATION-v3_v4.md) for detailed instructions on how to update application code from version 3.x of the Form Recognizer SDK to the new version (4.x). Additionally, the [CHANGELOG](https://github.com/azure/azure-sdk-for-js/blob/main/sdk/formrecognizer/ai-form-recognizer/CHANGELOG.md) contains an outline of the changes. This package targets Azure Form Recognizer service API version `2022-06-30-preview` and newer. To continue to use Form Recognizer API version 2.1, please use major version 3 of the client package (`@azure/ai-form-recognizer@^3.2.0`).
19
+ In version 4.0.0, this package introduces a full redesign of the Azure Form Recognizer client library. To leverage features of the newest Form Recognizer service API (version "2022-08-31" and newer), the new SDK is required, and application code must be changed to use the new clients. Please see the [Migration Guide](https://github.com/azure/azure-sdk-for-js/blob/main/sdk/formrecognizer/ai-form-recognizer/MIGRATION-v3_v4.md) for detailed instructions on how to update application code from version 3.x of the Form Recognizer SDK to the new version (4.x). Additionally, the [CHANGELOG](https://github.com/azure/azure-sdk-for-js/blob/main/sdk/formrecognizer/ai-form-recognizer/CHANGELOG.md) contains an outline of the changes since version 3.x. This package targets Azure Form Recognizer service API version `2022-08-31` and newer. To continue to use Form Recognizer API version 2.1, please use major version 3 of the client package (`@azure/ai-form-recognizer@^3.2.0`).
20
20
 
21
- ### Install the `@azure/ai-form-recognizer` Package
21
+ ### Install the `@azure/ai-form-recognizer` package
22
22
 
23
23
  Install the Azure Form Recognizer client library for JavaScript with `npm`:
24
24
 
25
25
  ```bash
26
- npm install @azure/ai-form-recognizer@4.0.0-beta.3
26
+ npm install @azure/ai-form-recognizer
27
27
  ```
28
28
 
29
- ## Getting Started
29
+ ## Getting started
30
30
 
31
31
  ```javascript
32
32
  const { DocumentAnalysisClient } = require("@azure/ai-form-recognizer");
@@ -47,7 +47,7 @@ const poller = await client.beginAnalyzeDocument("<model ID>", file);
47
47
  const { pages, tables, styles, keyValuePairs, entities, documents } = await poller.pollUntilDone();
48
48
  ```
49
49
 
50
- ### Currently Supported Environments
50
+ ### Currently supported environments
51
51
 
52
52
  - [LTS versions of Node.js](https://nodejs.org/about/releases/)
53
53
  - Latest versions of Safari, Chrome, Edge, and Firefox.
@@ -59,7 +59,7 @@ See our [support policy](https://github.com/Azure/azure-sdk-for-js/blob/main/SUP
59
59
  - An [Azure subscription](https://azure.microsoft.com/free/)
60
60
  - A [Cognitive Services or Form Recognizer resource][fr_or_cs_resource]. If you need to create the resource, you can use the [Azure Portal][azure_portal] or [Azure CLI][azure_cli].
61
61
 
62
- #### Create a Form Recognizer Resource
62
+ #### Create a Form Recognizer resource
63
63
 
64
64
  Form Recognizer supports both [multi-service and single-service access][multi_and_single_service]. Create a Cognitive Services resource if you plan to access multiple cognitive services under a single endpoint/key. For Form Recognizer access only, create a Form Recognizer resource.
65
65
 
@@ -83,7 +83,7 @@ If you use the Azure CLI, replace `<your-resource-group-name>` and `<your-resour
83
83
  az cognitiveservices account create --kind FormRecognizer --resource-group <your-resource-group-name> --name <your-resource-name> --sku <your-sku-name> --location <your-location>
84
84
  ```
85
85
 
86
- ### Create and Authenticate a Client
86
+ ### Create and authenticate a client
87
87
 
88
88
  In order to interact with the Form Recognizer service, you'll need to select either a `DocumentAnalysisClient` or a `DocumentModelAdministrationClient`, and create an instance of this type. In the following examples, we will use `DocumentAnalysisClient`. To create a client instance to access the Form Recognizer API, you will need the `endpoint` of your Form Recognizer resource and a `credential`. The Form Recognizer clients can use either an `AzureKeyCredential` with an API key of your resource or a `TokenCredential` that uses Azure Active Directory RBAC to authorize the client.
89
89
 
@@ -93,7 +93,7 @@ You can find the endpoint for your Form Recognizer resource either in the [Azure
93
93
  az cognitiveservices account show --name <your-resource-name> --resource-group <your-resource-group-name> --query "properties.endpoint"
94
94
  ```
95
95
 
96
- #### Using an API Key
96
+ #### Use an API key
97
97
 
98
98
  Use the [Azure Portal][azure_portal] to browse to your Form Recognizer resource and retrieve an API key, or use the [Azure CLI][azure_cli] snippet below:
99
99
 
@@ -111,7 +111,7 @@ const { DocumentAnalysisClient, AzureKeyCredential } = require("@azure/ai-form-r
111
111
  const client = new DocumentAnalysisClient("<endpoint>", new AzureKeyCredential("<API key>"));
112
112
  ```
113
113
 
114
- #### Using Azure Active Directory
114
+ #### Use Azure Active Directory
115
115
 
116
116
  API key authorization is used in most of the examples, but you can also authenticate the client with Azure Active Directory using the [Azure Identity library][azure_identity]. To use the [DefaultAzureCredential][defaultazurecredential] provider shown below or other credential providers provided with the Azure SDK, please install the `@azure/identity` package:
117
117
 
@@ -130,33 +130,31 @@ const { DefaultAzureCredential } = require("@azure/identity");
130
130
  const client = new DocumentAnalysisClient("<endpoint>", new DefaultAzureCredential());
131
131
  ```
132
132
 
133
- ## Key Concepts
133
+ ## Key concepts
134
134
 
135
135
  ### `DocumentAnalysisClient`
136
136
 
137
137
  `DocumentAnalysisClient` provides operations for analyzing input documents using custom and prebuilt models. It has three methods:
138
138
 
139
- - `beginAnalyzeDocument`, which extracts data from an input document using a custom or prebuilt model given by its model ID. For information about the prebuilt models supported in all resources and their model IDs/outputs, please see [the service's documentation of the models][fr-models].
140
- - `beginExtractLayout`, which uses the "prebuilt-layout" model to extract only the basic layout (OCR) information from the input documents, such as pages and their contents, tables, and observed text styles. This method provides a stronger TypeScript type for the layout result than the `beginAnalyzeDocument` method.
141
- - `beginExtractGeneralDocument`, which uses the "prebuilt-document" model to extract key-value pairs and entities in addition to the properties of the prebuilt layout model. This method also provides a stronger TypeScript type for the general document result than the `beginAnalyzeDocument` method.
142
- - `beginReadDocument`, which uses the "prebuilt-read" model to extract textual elements, such as page words and lines in addition to text language information.
139
+ - `beginAnalyzeDocument`, which extracts data from an input document file stream using a custom or prebuilt model given by its model ID. For information about the prebuilt models supported in all resources and their model IDs/outputs, please see [the service's documentation of the models][fr-models].
140
+ - `beginAnalyzeDocumentFromUrl`, which performs the same function as `beginAnalyzeDocument`, but submits a publicly-accessible URL of a file instead of a file stream.
143
141
 
144
142
  ### `DocumentModelAdministrationClient`
145
143
 
146
144
  `DocumentModelAdministrationClient` provides operations for managing (creating, reading, listing, and deleting) models in the Form Recognizer resource:
147
145
 
148
- - `beginBuildModel` starts an operation to create a new document model from your own training data set. The created model can extract fields according to a custom schema. The training data are expected to be located in an Azure Storage container and organized according to a particular convention. See the [service's documentation on creating a training data set][fr-build-training-set] for a more detailed explanation of applying labels to a training data set.
149
- - `beginComposeModel` starts an operation to compose multiple models into a single model. When used for custom form recognition, the new composed model will first perform a classification of the input documents to determine which of its submodels is most appropriate.
146
+ - `beginBuildDocumentModel` starts an operation to create a new document model from your own training data set. The created model can extract fields according to a custom schema. The training data are expected to be located in an Azure Storage container and organized according to a particular convention. See the [service's documentation on creating a training data set][fr-build-training-set] for a more detailed explanation of applying labels to a training data set.
147
+ - `beginComposeDocumentModel` starts an operation to compose multiple models into a single model. When used for custom form recognition, the new composed model will first perform a classification of the input documents to determine which of its submodels is most appropriate.
150
148
  - `beginCopyModelTo` starts an operation to copy a custom model from one Form Recognizer resource to another (or even to the same Form Recognizer resource). It requires a `CopyAuthorization` from the target Form Recognizer resource, which can be generated using the `getCopyAuthorization` method.
151
- - `getInfo` retrieves information about the resource's limits, such as the number of custom models and the maximum number of models the resource can support.
152
- - `getModel`, `listModels`, and `deleteModel` enable managing models in the resource.
149
+ - `getResourceDetails` retrieves information about the Form Recognizer resource's limits, such as the number of custom models and the maximum number of models the resource can support.
150
+ - `getDocumentModel`, `listDocumentModels`, and `deleteDocumentModel` enable managing models in the resource.
153
151
  - `getOperation` and `listOperations` enable viewing the status of model creation operations, even those operations that are ongoing or that have failed. Operations are retained for 24 hours.
154
152
 
155
- Please note that models can also be created using the Form Recognizer service's graphical user interface: [Form Recognizer Labeling Tool][fr-labeling-tool].
153
+ Please note that models can also be created using the Form Recognizer service's graphical user interface: [Form Recognizer Studio (Preview)][fr-studio].
156
154
 
157
155
  Sample code snippets that illustrate the use of `DocumentModelAdministrationClient` to build a model can be found [below, in the "Build a Model" example section.](#build-a-model).
158
156
 
159
- ### Long-Running Operations
157
+ ### Long-running operations
160
158
 
161
159
  Long-running operations (LROs) are operations which consist of an initial request sent to the service to start an operation, followed by polling for a result at a certain interval to determine if the operation has completed and whether it failed or succeeded. Ultimately, the LRO will either fail with an error or produce a result.
162
160
 
@@ -166,16 +164,17 @@ In Azure Form Recognizer, operations that create models (including copying and c
166
164
 
167
165
  The following section provides several JavaScript code snippets illustrating common patterns used in the Form Recognizer client libraries.
168
166
 
169
- - [Analyze a Document with a Model](#analyze-a-document-with-a-model)
170
- - [Extract Layout](#extract-layout)
171
- - [Extract General Document](#extract-general-document)
172
- - [Use Prebuilt Models](#using-prebuilt-models)
173
- - [Build a Model](#build-a-model)
174
- - [Manage Models](#manage-models)
167
+ - [Analyze a document with a model ID](#analyze-a-document-with-a-model-id)
168
+ - [Use prebuilt document models](#use-prebuilt-document-models)
169
+ - [Use the "layout" prebuilt](#use-the-layout-prebuilt)
170
+ - [Use the "document" prebuilt](#use-the-document-prebuilt)
171
+ - [Use the "read" prebuilt](#use-the-read-prebuilt)
172
+ - [Build a model](#build-a-model)
173
+ - [Manage models](#manage-models)
175
174
 
176
- ### Analyze a Document with a Model
175
+ ### Analyze a document with a model ID
177
176
 
178
- The `beginAnalyzeDocument` method can extract fields and table data from documents. These models are trained with your own data, so they're tailored to your particular documents. A custom model should only be used with documents of the same structure as one of the document types in the model (there may be multiple, such as in a composed model).
177
+ The `beginAnalyzeDocument` method can extract fields and table data from documents. Analysis may use either a custom model, trained with your own data, or a prebuilt model provided by the service (see _[Use Prebuilt Models](#use-prebuilt-models)_ below). A custom model is tailored to your own documents, so it should only be used with documents of the same structure as one of the document types in the model (there may be multiple, such as in a composed model).
179
178
 
180
179
  ```javascript
181
180
  const { DocumentAnalysisClient, AzureKeyCredential } = require("@azure/ai-form-recognizer");
@@ -201,7 +200,7 @@ async function main() {
201
200
  const { documents, pages, tables } = await poller.pollUntilDone();
202
201
 
203
202
  console.log("Documents:");
204
- for (const document of documents) {
203
+ for (const document of documents || []) {
205
204
  console.log(`Type: ${document.docType}`);
206
205
  console.log("Fields:");
207
206
  for (const [name, field] of Object.entries(document.fields)) {
@@ -211,12 +210,12 @@ async function main() {
211
210
  }
212
211
  }
213
212
  console.log("Pages:");
214
- for (const page of pages) {
213
+ for (const page of pages || []) {
215
214
  console.log(`Page number: ${page.pageNumber} (${page.width}x${page.height} ${page.unit})`);
216
215
  }
217
216
 
218
217
  console.log("Tables:");
219
- for (const table of tables) {
218
+ for (const table of tables || []) {
220
219
  console.log(`- Table (${table.columnCount}x${table.rowCount})`);
221
220
  for (const cell of table.cells) {
222
221
  console.log(` - cell (${cell.rowIndex},${cell.columnIndex}) "${cell.content}"`);
@@ -229,19 +228,115 @@ main().catch((err) => {
229
228
  });
230
229
  ```
231
230
 
232
- As an alternative to providing a readable stream, a publicly-accessible URL can be provided instead. "Publicly-accessible" means that URL sources must be accessible from the service's infrastructure (in other words, a private intranet URL, or URLs that use header- or certificate-based secrets, will not work, as the Form Recognizer service must be able to access the URL). However, the URL itself could encode a secret, such as an Azure Storage blob URL that contains a SAS token in the query parameters.
231
+ #### Analyze a document from a URL
233
232
 
234
- ### Extract Layout
233
+ As an alternative to providing a readable stream, a publicly-accessible URL can be provided instead using the `beginAnalyzeDocumentFromUrl` method. "Publicly-accessible" means that URL sources must be accessible from the service's infrastructure (in other words, a private intranet URL, or URLs that use header- or certificate-based secrets, will not work, as the Form Recognizer service must be able to access the URL). However, the URL itself could encode a secret, such as an Azure Storage blob URL that contains a SAS token in the query parameters.
235
234
 
236
- <a id="beginextractlayout-deprecation"></a>
237
- **Deprecation Warning** ⚠️: The built in `beginExtractLayout` method is deprecated as of version `4.0.0-beta.4`. Prior to a stable release of version 4.0.0, we will remove it and provide a solution that uses the `beginAnalyzeDocument` method instead. This will align the `"prebuilt-layout"` model with the other prebuilt models, enabling us to continue to provide timely updates and ensure stability as the number of supported prebuilt models increases and as their capabilities are enhanced.
235
+ ### Use prebuilt document models
238
236
 
239
- The `beginExtractLayout` method extracts only the basic elements of the document, such as pages, (which consist of text words/lines and selection marks), tables, and visual text styles along with their bounding regions and spans within the text content of the input documents.
237
+ The `beginAnalyzeDocument` method also supports extracting fields from certain types of common documents such as receipts, invoices, business cards, identity documents, and more using prebuilt models provided by the Form Recognizer service. The prebuilt models may be provided either as model ID strings (the same as custom document models&mdash;see the _[other prebuilt models](#other-prebuilt-models)_ section below) or using a `DocumentModel` object. When using a `DocumentModel`, the Form Recognizer SDK for JavaScript provides a much stronger TypeScript type for the resulting extracted documents based on the model's schema, and it will be converted to use JavaScript naming conventions.
240
238
 
241
- ```javascript
242
- const { DocumentAnalysisClient, AzureKeyCredential } = require("@azure/ai-form-recognizer");
239
+ <a id="prebuiltmodels-removed"></a>
240
+ **Breaking Change Warning** ⚠️: In previous `4.0.0-beta` versions of the Azure Form Recognizer SDK for JavaScript, prebuilt `DocumentModel` objects were exported from the package through an object named `PrebuiltModels`. This object has been removed and replaced with the [`DocumentModel` samples][samples-prebuilt], which you may use as part of your own project. This change will enable us to continue to provide timely updates and ensure stability as the number of supported prebuilt models increases and as their capabilities are enhanced.
243
241
 
244
- const fs = require("fs");
242
+ Example `DocumentModel` objects for the current service API version (`2022-08-31`) can be found in [the `prebuilt` samples directory][samples-prebuilt]. In the following example, we'll use the `PrebuiltReceiptModel` from the [`prebuilt-receipt.ts`] file in that directory.
243
+
244
+ Since the main benefit of `DocumentModel`-based analysis is stronger TypeScript type constraints, the following sample is written in TypeScript using ECMAScript module syntax:
245
+
246
+ ```typescript
247
+ import { DocumentAnalysisClient, AzureKeyCredential } from "@azure/ai-form-recognizer";
248
+
249
+ // Copy the file from the above-linked sample directory so that it can be imported in this module
250
+ import { PrebuiltReceiptModel } from "./prebuilt/prebuilt-receipt";
251
+
252
+ import fs from "fs";
253
+
254
+ async function main() {
255
+ const endpoint = "<cognitive services endpoint>";
256
+ const apiKey = "<api key>";
257
+ const path = "<path to your receipt document>"; // pdf/jpeg/png/tiff formats
258
+
259
+ const readStream = fs.createReadStream(path);
260
+
261
+ const client = new DocumentAnalysisClient(endpoint, new AzureKeyCredential(apiKey));
262
+
263
+ // The PrebuiltReceiptModel `DocumentModel` instance encodes both the model ID and a stronger return type for the operation
264
+ const poller = await client.beginAnalyzeDocument(PrebuiltReceiptModel, readStream, {
265
+ onProgress: ({ status }) => {
266
+ console.log(`status: ${status}`);
267
+ },
268
+ });
269
+
270
+ const {
271
+ documents: [receiptDocument],
272
+ } = await poller.pollUntilDone();
273
+
274
+ // The fields of the document constitute the extracted receipt data.
275
+ const receipt = receiptDocument.fields;
276
+
277
+ if (receipt === undefined) {
278
+ throw new Error("Expected at least one receipt in analysis result.");
279
+ }
280
+
281
+ console.log(`Receipt data (${receiptDocument.docType})`);
282
+ console.log(" Merchant Name:", receipt.merchantName?.value);
283
+
284
+ // The items of the receipt are an example of a `DocumentArrayValue`
285
+ if (receipt.items !== undefined) {
286
+ console.log("Items:");
287
+ for (const { properties: item } of receipt.items.values) {
288
+ console.log("- Description:", item.description?.value);
289
+ console.log(" Total Price:", item.totalPrice?.value);
290
+ }
291
+ }
292
+
293
+ console.log(" Total:", receipt.total?.value);
294
+ }
295
+
296
+ main().catch((err) => {
297
+ console.error("The sample encountered an error:", err);
298
+ });
299
+ ```
300
+
301
+ Alternatively, as mentioned above, instead of using `PrebuiltReceiptModel`, which produces the stronger return type, the prebuilt receipt's model ID ("prebuilt-receipt") can be used, but the document fields will not be strongly typed in TypeScript, and the field names will generally be in "PascalCase" instead of "camelCase".
302
+
303
+ #### **Other prebuilt models**
304
+
305
+ You are not limited to receipts! There are a few prebuilt models to choose from, with more on the way. Each prebuilt model has its own set of supported fields:
306
+
307
+ - Receipts, using [`PrebuiltReceiptModel`][samples-prebuilt-receipt] (as above) or the prebuilt receipt model ID `"prebuilt-receipt"`.
308
+ - Business cards, using [`PrebuiltBusinessCardModel`][samples-prebuilt-businesscard] or its model ID `"prebuilt-businessCard"`.
309
+ - Invoices, using [`PrebuiltInvoiceModel`][samples-prebuilt-invoice] or its model ID `"prebuilt-invoice"`.
310
+ - Identity Documents (such as driver licenses and passports), using [`PrebuiltIdDocumentModel`][samples-prebuilt-iddocument] or its model ID `"prebuilt-idDocument"`.
311
+ - W2 Tax Forms (United States), using [`PrebuiltTaxUsW2Model`][samples-prebuilt-tax.us.w2] or its model ID `"prebuilt-tax.us.w2"`.
312
+ - Health Insurance Cards (United States), using [`PrebuiltHealthInsuranceCardUsModel`][samples-prebuilt-healthinsurancecard.us] or its model ID `"prebuilt-healthInsuranceCard.us"`.
313
+
314
+ Each of the above prebuilt models produces `documents` (extracted instances of the model's field schema). There are also three prebuilt models that do not have field schemas and therefore do not produce `documents`. They are:
315
+
316
+ - The prebuilt Layout model (see _[Use the "layout" prebuilt](#use-the-layout-prebuilt)_ below), which extracts information about basic layout (OCR) elements such as pages and tables.
317
+ - The prebuilt General Document model (see _[Use the "document" prebuilt](#use-the-document-prebuilt)_ below), which adds key-value pairs (directed associations between page elements, such as labeled elements) to the information produced by the layout model.
318
+ - The prebuilt Read model (see _[Use the "read" prebuilt](#use-the-read-prebuilt)_ below), which extracts only textual elements, such as page words and lines, along with information about the language of the document.
319
+
320
+ For information about the fields of all of these models, see [the service's documentation of the available prebuilt models](https://aka.ms/azsdk/formrecognizer/models).
321
+
322
+ The fields of all prebuilt models may also be accessed programmatically using the `getDocumentModel` method (by their model IDs) of `DocumentModelAdministrationClient` and inspecting the `docTypes` field in the result.
323
+
324
+ ### Use the "layout" prebuilt
325
+
326
+ <a id="beginextractlayout-removed"></a>
327
+ **Breaking Change Warning** ⚠️: In previous `4.0.0-beta` versions of the Azure Form Recognizer SDK for JavaScript, the prebuilt Layout model was provided by a custom method named `beginExtractLayout`. This method was removed and replaced with an example `DocumentModel` instance named [`PrebuiltLayoutModel`][samples-prebuilt-layout] for use with the same `beginAnalyzeDocument` method that is used to perform analysis with other prebuilt models. As previously, the model ID `"prebuilt-layout"` may still be used directly. This change will align the `prebuilt-layout` model with the other prebuilt models and enable us to continue to provide timely updates and ensure stability as the number of supported prebuilt models increases and as their capabilities are enhanced.
328
+
329
+ The `"prebuilt-layout"` model extracts only the basic elements of the document, such as pages, (which consist of text words/lines and selection marks), tables, and visual text styles along with their bounding regions and spans within the text content of the input documents. We provide a strongly-typed `DocumentModel` instance named [`PrebuiltLayoutModel`][samples-prebuilt-layout] that invokes this model, or as always its model ID `"prebuilt-layout"` may be used directly.
330
+
331
+ Since the main benefit of `DocumentModel`-based analysis is stronger TypeScript type constraints, the following sample is written in TypeScript using ECMAScript module syntax:
332
+
333
+ ```typescript
334
+ import { DocumentAnalysisClient, AzureKeyCredential } from "@azure/ai-form-recognizer";
335
+
336
+ // Copy the above-linked `DocumentModel` file so that it may be imported in this module.
337
+ import { PrebuiltLayoutModel } from "./prebuilt/prebuilt-layout";
338
+
339
+ import fs from "fs";
245
340
 
246
341
  async function main() {
247
342
  const endpoint = "<cognitive services endpoint>";
@@ -251,14 +346,14 @@ async function main() {
251
346
  const readStream = fs.createReadStream(path);
252
347
 
253
348
  const client = new DocumentAnalysisClient(endpoint, new AzureKeyCredential(apiKey));
254
- const poller = await client.beginExtractLayout(readStream);
349
+ const poller = await client.beginAnalyzeDocument(PrebuiltLayoutModel, readStream);
255
350
  const { pages, tables } = await poller.pollUntilDone();
256
351
 
257
- for (const page of pages) {
352
+ for (const page of pages || []) {
258
353
  console.log(`- Page ${page.pageNumber}: (${page.width}x${page.height} ${page.unit})`);
259
354
  }
260
355
 
261
- for (const table of tables) {
356
+ for (const table of tables || []) {
262
357
  console.log(`- Table (${table.columnCount}x${table.rowCount})`);
263
358
  for (const cell of table.cells) {
264
359
  console.log(` cell [${cell.rowIndex},${cell.columnIndex}] "${cell.content}"`);
@@ -271,19 +366,22 @@ main().catch((err) => {
271
366
  });
272
367
  ```
273
368
 
274
- _Note_: you may also use the `beginAnalyzeDocument` method to extract layout information using the prebuilt layout model by providing the model ID `"prebuilt-layout"`. This method provides a weaker TypeScript type for the layout analysis result, but will produce the same information. The `beginExtractLayout` method is available for your convenience.
369
+ ### Use the "document" prebuilt
275
370
 
276
- ### Extract General Document
371
+ <a id="beginextractdocument-removed"></a>
372
+ **Breaking Change Warning** ⚠️: In previous `4.0.0-beta` versions of the Azure Form Recognizer SDK for JavaScript, the prebuilt document model was provided by a custom method named `beginExtractGeneralDocument`. This method was removed and replaced with an example `DocumentModel` instance named [`PrebuiltDocumentModel`][samples-prebuilt-document] for use with the same `beginAnalyzeDocument` method that is used to perform analysis with other prebuilt models. As previously, the model ID `"prebuilt-document"` may still be used directly. This change will align the `prebuilt-document` model with the other prebuilt models and enable us to continue to provide timely updates and ensure stability as the number of supported prebuilt models increases and as their capabilities are enhanced.
277
373
 
278
- <a id="beginextractgeneraldocument-deprecation"></a>
279
- **Deprecation Warning** ⚠️: The built in `beginExtractGeneralDocument` method is deprecated as of version `4.0.0-beta.4`. Prior to a stable release of version 4.0.0, we will remove it and provide a solution that uses the `beginAnalyzeDocument` method instead. This will align the `"prebuilt-document"` model with the other prebuilt models, enabling us to continue to provide timely updates and ensure stability as the number of supported prebuilt models increases and as their capabilities are enhanced.
374
+ The `"prebuilt-document"` model extracts information about key-value pairs (directed associations between page elements, such as labeled fields) in addition to the properties produced by the layout extraction method. This prebuilt (general) document model provides similar functionality to the custom models trained without label information in previous iterations of the Form Recognizer service, but it is now provided as a prebuilt model that works with a wide variety of documents. We provide a strongly-typed `DocumentModel` instance named [`PrebuiltDocumentModel`][samples-prebuilt-document] that invokes this model, or as always its model ID `"prebuilt-document"` may be used directly.
280
375
 
281
- The `beginExtractGeneralDocument` method extracts information about key-value pairs and entities in addition to the properties produced by the layout extraction method. This prebuilt (general) document model provides similar functionality to the custom models trained without label information in previous iterations of the Form Recognizer service, but it is now provided as a prebuilt model that works with a wide variety of documents.
376
+ Since the main benefit of `DocumentModel`-based analysis is stronger TypeScript type constraints, the following sample is written in TypeScript using ECMAScript module syntax:
282
377
 
283
- ```javascript
284
- const { DocumentAnalysisClient, AzureKeyCredential } = require("@azure/ai-form-recognizer");
378
+ ```typescript
379
+ import { DocumentAnalysisClient, AzureKeyCredential } from "@azure/ai-form-recognizer";
285
380
 
286
- const fs = require("fs");
381
+ // Copy the above-linked `DocumentModel` file so that it may be imported in this module.
382
+ import { PrebuiltDocumentModel } from "./prebuilt/prebuilt-document";
383
+
384
+ import fs from "fs";
287
385
 
288
386
  async function main() {
289
387
  const endpoint = "<cognitive services endpoint>";
@@ -293,13 +391,13 @@ async function main() {
293
391
  const readStream = fs.createReadStream(path);
294
392
 
295
393
  const client = new DocumentAnalysisClient(endpoint, new AzureKeyCredential(apiKey));
296
- const poller = await client.beginExtractGeneralDocument(readStream);
394
+ const poller = await client.beginAnalyzeDocument(PrebuiltDocumentModel, readStream);
297
395
 
298
396
  // `pages`, `tables` and `styles` are also available as in the "layout" example above, but for the sake of this
299
397
  // example we won't show them here.
300
- const { keyValuePairs, entities } = await poller.pollUntilDone();
398
+ const { keyValuePairs } = await poller.pollUntilDone();
301
399
 
302
- if (keyValuePairs.length <= 0) {
400
+ if (!keyValuePairs || keyValuePairs.length <= 0) {
303
401
  console.log("No key-value pairs were extracted from the document.");
304
402
  } else {
305
403
  console.log("Key-Value Pairs:");
@@ -308,19 +406,6 @@ async function main() {
308
406
  console.log(" Value:", `"${value?.content ?? "<undefined>"}" (${confidence})`);
309
407
  }
310
408
  }
311
-
312
- if (entities.length <= 0) {
313
- console.log("No entities were extracted from the document.");
314
- } else {
315
- console.log("Entities:");
316
- for (const entity of entities) {
317
- console.log(
318
- `- "${entity.content}" ${entity.category} - ${entity.subCategory ?? "<none>"} (${
319
- entity.confidence
320
- })`
321
- );
322
- }
323
- }
324
409
  }
325
410
 
326
411
  main().catch((err) => {
@@ -328,16 +413,25 @@ main().catch((err) => {
328
413
  });
329
414
  ```
330
415
 
331
- _Note_: you may also use the `beginAnalyzeDocument` method to extract general document information using the prebuilt document model by providing the model ID `"prebuilt-document"`. This method provides a weaker TypeScript type for the layout analysis result, but will produce the same information. The `beginExtractGeneralDocument` method is available for your convenience.
416
+ ### Use the "read" prebuilt
332
417
 
333
- ### Read Document
418
+ <a id="beginextractdocument-removed"></a>
419
+ **Breaking Change Warning** ⚠️: In previous `4.0.0-beta` versions of the Azure Form Recognizer SDK for JavaScript, the prebuilt "read" model was provided by a custom method named `beginReadDocument`. This method was removed and replaced with an example `DocumentModel` instance named [`PrebuiltReadModel`][samples-prebuilt-read] for use with the same `beginAnalyzeDocument` method that is used to perform analysis with other prebuilt models. As previously, the model ID `"prebuilt-read"` may still be used directly. This change will align the `prebuilt-read` model with the other prebuilt models and enable us to continue to provide timely updates and ensure stability as the number of supported prebuilt models increases and as their capabilities are enhanced.
334
420
 
335
- <a id="beginreaddocument-deprecation"></a>
336
- **Deprecation Warning** ⚠️: The built in `beginReadDocument` method is deprecated as of version `4.0.0-beta.4`. Prior to a stable release of version 4.0.0, we will remove it and provide a solution that uses the `beginAnalyzeDocument` method instead. This will align the `"prebuilt-document"` model with the other prebuilt models, enabling us to continue to provide timely updates and ensure stability as the number of supported prebuilt models increases and as their capabilities are enhanced.
421
+ The `"prebuilt-read"` model extracts textual information in a document such as words and paragraphs and analyzes the language and writing style (e.g. handwritten vs. typeset) of that text. We provide a strongly-typed `DocumentModel` instance named [`PrebuiltReadModel`][samples-prebuilt-document] that invokes this model, or as always its model ID `"prebuilt-read"` may be used directly.
337
422
 
338
- ```javascript
339
- const { DocumentAnalysisClient, AzureKeyCredential } = require("@azure/ai-form-recognizer");
340
- const fs = require("fs");
423
+ Since the main benefit of `DocumentModel`-based analysis is stronger TypeScript type constraints, the following sample is written in TypeScript using ECMAScript module syntax:
424
+
425
+ ```typescript
426
+ import { DocumentAnalysisClient, AzureKeyCredential } from "@azure/ai-form-recognizer";
427
+
428
+ // Copy the above-linked `DocumentModel` file so that it may be imported in this module.
429
+ import { PrebuiltReadModel } from "./prebuilt/prebuilt-read";
430
+
431
+ // See the samples directory for a definition of this helper function.
432
+ import { getTextOfSpans } from "./utils";
433
+
434
+ import fs from "fs";
341
435
 
342
436
  async function main() {
343
437
  const endpoint = "<cognitive services endpoint>";
@@ -347,22 +441,24 @@ async function main() {
347
441
  const readStream = fs.createReadStream(path);
348
442
 
349
443
  const client = new DocumentAnalysisClient(endpoint, new AzureKeyCredential(apiKey));
350
- const poller = await client.beginReadDocument(readStream);
444
+ const poller = await client.beginAnalyzeDocument(PrebuiltReadModel, readStream);
351
445
 
352
446
  // The "prebuilt-read" model (`beginReadDocument` method) only extracts information about the textual content of the
353
447
  // document, such as page text elements, text styles, and information about the language of the text.
354
- const { content, pages, languages, styles } = await poller.pollUntilDone();
448
+ const { content, pages, languages } = await poller.pollUntilDone();
355
449
 
356
- if (pages.length <= 0) {
450
+ if (!pages || pages.length <= 0) {
357
451
  console.log("No pages were extracted from the document.");
358
452
  } else {
359
453
  console.log("Pages:");
360
454
  for (const page of pages) {
361
455
  console.log("- Page", page.pageNumber, `(unit: ${page.unit})`);
362
456
  console.log(` ${page.width}x${page.height}, angle: ${page.angle}`);
363
- console.log(` ${page.lines.length} lines, ${page.words.length} words`);
457
+ console.log(
458
+ ` ${page.lines && page.lines.length} lines, ${page.words && page.words.length} words`
459
+ );
364
460
 
365
- if (page.lines.length > 0) {
461
+ if (page.lines && page.lines.length > 0) {
366
462
  console.log(" Lines:");
367
463
 
368
464
  for (const line of page.lines) {
@@ -372,14 +468,15 @@ async function main() {
372
468
  }
373
469
  }
374
470
 
375
- if (languages.length <= 0) {
471
+ if (!languages || languages.length <= 0) {
376
472
  console.log("No language spans were extracted from the document.");
377
473
  } else {
378
474
  console.log("Languages:");
379
475
  for (const languageEntry of languages) {
380
476
  console.log(
381
- `- Found language: ${languageEntry.languageCode} (confidence: ${languageEntry.confidence})`
477
+ `- Found language: ${languageEntry.locale} (confidence: ${languageEntry.confidence})`
382
478
  );
479
+
383
480
  for (const text of getTextOfSpans(content, languageEntry.spans)) {
384
481
  const escapedText = text.replace(/\r?\n/g, "\\n").replace(/"/g, '\\"');
385
482
  console.log(` - "${escapedText}"`);
@@ -394,94 +491,11 @@ main().catch((error) => {
394
491
  });
395
492
  ```
396
493
 
397
- _Note_: you may also use the `beginAnalyzeDocument` method to read document information using the "read" model by providing the model ID `"prebuilt-read"`. This method provides a weaker TypeScript type for the read result, but will produce the same information. The `beginReadDocument` method is available for your convenience.
398
-
399
- ### Using Prebuilt Models
400
-
401
- The `beginAnalyzeDocument` method also supports extracting fields from certain types of common documents such as receipts, invoices, business cards, and identity documents using prebuilt models provided by the Form Recognizer service. The prebuilt models may be provided either as model ID strings (the same as custom document models) or using a `DocumentModel` object. When using a `DocumentModel`, the Form Recognizer SDK for JavaScript provides a much stronger TypeScript type for the resulting extracted documents based on the model's schema, and it will be converted to use JavaScript naming conventions.
402
-
403
- <a id="prebuiltmodels-deprecation"></a>
404
- **Deprecation Warning** ⚠️: The built in `PrebuiltModels` are deprecated as of version `4.0.0-beta.4`. Prior to a stable release of version 4.0.0, we will replace `PrebuiltModels` with an out-of-tree solution that provides the same strongly-typed functionality. This will enable us to continue to provide timely updates and ensure stability as the number of supported prebuilt models increases and as their capabilities are enhanced.
405
-
406
- For example, the following code shows how to use `PrebuiltModels.Receipt` to extract a strongly-typed receipt object from an input.
407
-
408
- ```javascript
409
- const {
410
- DocumentAnalysisClient,
411
- PrebuiltModels,
412
- AzureKeyCredential,
413
- } = require("@azure/ai-form-recognizer");
414
-
415
- const fs = require("fs");
416
-
417
- async function main() {
418
- const endpoint = "<cognitive services endpoint>";
419
- const apiKey = "<api key>";
420
- const path = "<path to your receipt document>"; // pdf/jpeg/png/tiff formats
421
-
422
- const readStream = fs.createReadStream(path);
423
-
424
- const client = new DocumentAnalysisClient(endpoint, new AzureKeyCredential(apiKey));
425
-
426
- // The PrebuiltModels.Receipt `DocumentModel` encodes both the model ID and a stronger return type for the operation
427
- const poller = await client.beginAnalyzeDocument(PrebuiltModels.Receipt, readStream, {
428
- onProgress: ({ status }) => {
429
- console.log(`status: ${status}`);
430
- },
431
- });
432
-
433
- const {
434
- documents: [receiptDocument],
435
- } = await poller.pollUntilDone();
436
-
437
- // The fields of the document constitute the extracted receipt data.
438
- const receipt = receiptDocument.fields;
439
-
440
- if (receipt === undefined) {
441
- throw new Error("Expected at least one receipt in analysis result.");
442
- }
443
-
444
- console.log(`Receipt data (${receiptDocument.docType})`);
445
- console.log(" Merchant Name:", receipt.merchantName?.value);
446
-
447
- // The items of the receipt are an example of a `DocumentArrayValue`
448
- if (receipt.items !== undefined) {
449
- console.log("Items:");
450
- for (const { properties: item } of receipt.items.values) {
451
- console.log("- Description:", item.description?.value);
452
- console.log(" Total Price:", item.totalPrice?.value);
453
- }
454
- }
455
-
456
- console.log(" Total:", receipt.total?.value);
457
- }
458
-
459
- main().catch((err) => {
460
- console.error("The sample encountered an error:", err);
461
- });
462
- ```
463
-
464
- Alternatively, as mentioned above, instead of using `PrebuiltDocuments.Receipt`, which produces the stronger return type, the prebuilt receipt's model ID ("prebuilt-receipt") can be used, but the document fields will not be strongly typed in TypeScript, and the field names will be in "PascalCase" instead of "camelCase".
465
-
466
- #### **Other Prebuilt Models**
467
-
468
- You are not limited to receipts! There are a few prebuilt models to choose from, with more on the way. Each prebuilt model has its own set of supported fields:
494
+ ### Build a model
469
495
 
470
- - Receipts, using `PrebuiltModels.Receipt` or the prebuilt receipt model ID `"prebuilt-receipt"`.
471
- - Business cards, using `PrebuiltModels.BusinessCard` or its model ID `"prebuilt-businessCard"`.
472
- - Invoices, using `PrebuiltModels.Invoice` or its model ID `"prebuilt-invoice"`.
473
- - Identity Documents (such as driver licenses and passports), using `PrebuiltModels.IdentityDocument` or its model ID `"prebuilt-idDocument"`.
474
- - W2 Tax Forms (United States), using `PrebuiltModels.TaxUsW2` or its model ID `"prebuilt-tax.us.w2"`.
475
- - Health Insurance Cards (United States), using `PrebuiltModels.HealthInsuranceCardUs` or its model ID `"prebuilt-healthInsuranceCard.us"`.
476
- - Vaccination Cards (currently supports US COVID-19 vaccination cards), using `PrebuiltModels.VaccinationCard` or its model ID `"prebuilt-vaccinationCard"`.
496
+ The SDK also supports creating models using the `DocumentModelAdministrationClient` class. Building a model from labeled training data creates a new model that is trained on your own documents, and the resulting model will be able to recognize values from the structures of those documents. The model building operation accepts a SAS-encoded URL to an Azure Storage Blob container that holds the training documents. The Form Recognizer service's infrastructure will read the files in the container and create a model based on their contents. For more details on how to create and structure a training data container, see the [Form Recognizer service's documentation for building a model][fr-build-model].
477
497
 
478
- For information about the fields of these models, see [the service's documentation of the available prebuilt models](https://aka.ms/azsdk/formrecognizer/models).
479
-
480
- The fields of all prebuilt document models may also be accessed programmatically using the `getModel` method (by their model IDs) of `DocumentModelAdministrationClient` and inspecting the `docTypes` field in the result.
481
-
482
- ### Build a Model
483
-
484
- The SDK also supports creating models, using `DocumentModelAdministrationClient`. Building a model from labeled training data creates a new model that is trained on your own documents, and the resulting model will be able to recognize values from the structures of those documents. The model building operation accepts a SAS-encoded URL to an Azure Storage Blob container that holds the training documents. The Form Recognizer service's infrastructure will read the files in the container and create a model based on their contents. For more details on how to create and structure a training data container, see the [Form Recognizer service's documentation for building a model][fr-build-model]. The Form Recognizer service team has created a tool to assist in the labeling and creation of models, please see [the documentation of the labeling tool][fr-labeling-tool] for more information.
498
+ While we provide these methods for programmatic model creation, the Form Recognizer service team has created an interactive web application, [Form Recognizer Studio (Preview)][fr-studio], that enables creating and managing models on the web.
485
499
 
486
500
  For example, the following program builds a custom document model using a SAS-encoded URL to a pre-existing Azure Storage container:
487
501
 
@@ -496,17 +510,14 @@ async function main() {
496
510
  const apiKey = "<api key>";
497
511
  const containerSasUrl = "<SAS url to the blob container storing training documents>";
498
512
 
499
- const trainingClient = new DocumentModelAdministrationClient(
500
- endpoint,
501
- new AzureKeyCredential(apiKey)
502
- );
513
+ const client = new DocumentModelAdministrationClient(endpoint, new AzureKeyCredential(apiKey));
503
514
 
504
515
  // You must provide the model ID. It can be any text that does not start with "prebuilt-".
505
516
  // For example, you could provide a randomly generated GUID using the "uuid" package.
506
517
  // The second parameter is the SAS-encoded URL to an Azure Storage container with the training documents.
507
518
  // The third parameter is the build mode: one of "template" (the only mode prior to 4.0.0-beta.3) or "neural".
508
519
  // See https://aka.ms/azsdk/formrecognizer/buildmode for more information about build modes.
509
- const poller = await trainingClient.beginBuildModel("<model ID>", containerSasUrl, "template", {
520
+ const poller = await client.beginBuildDocumentModel("<model ID>", containerSasUrl, "template", {
510
521
  // The model description is optional and can be any text.
511
522
  description: "This is my new model!",
512
523
  onProgress: ({ status }) => {
@@ -517,7 +528,7 @@ async function main() {
517
528
 
518
529
  console.log("Model ID:", model.modelId);
519
530
  console.log("Description:", model.description);
520
- console.log("Created:", model.createdDateTime);
531
+ console.log("Created:", model.createdOn);
521
532
 
522
533
  // A model may contain several document types, which describe the possible object structures of fields extracted using
523
534
  // this model
@@ -544,9 +555,9 @@ main().catch((err) => {
544
555
  });
545
556
  ```
546
557
 
547
- ### Manage Models
558
+ ### Manage models
548
559
 
549
- `DocumentModelAdministrationClient` also provides several methods for managing models. The following example shows how to iterate through the models in a Form Recognizer resource (this will include both custom models in the resource as well as prebuilt models that are common to all resources), get a model by ID, and delete a model.
560
+ `DocumentModelAdministrationClient` also provides several methods for accessing and listing models. The following example shows how to iterate through the models in a Form Recognizer resource (this will include both custom models in the resource as well as prebuilt models that are common to all resources), get a model by ID, and delete a model.
550
561
 
551
562
  ```javascript
552
563
  const {
@@ -559,10 +570,10 @@ async function main() {
559
570
  const apiKey = "<api key>";
560
571
  const client = new DocumentModelAdministrationClient(endpoint, new AzureKeyCredential(apiKey));
561
572
 
562
- // Produces an async iterable that supports paging (`PagedAsyncIterableIterator`). The `listModels` method will only
573
+ // Produces an async iterable that supports paging (`PagedAsyncIterableIterator`). The `listDocumentModels` method will only
563
574
  // iterate over model summaries, which do not include detailed schema information. Schema information is only returned
564
- // from `getModel` as part of the full model information.
565
- const models = client.listModels();
575
+ // from `getDocumentModel` as part of the full model information.
576
+ const models = client.listDocumentModels();
566
577
  let i = 1;
567
578
  for await (const summary of models) {
568
579
  console.log(`Model ${i++}:`, summary);
@@ -570,23 +581,23 @@ async function main() {
570
581
 
571
582
  // The iterable is paged, and the application can control the flow of paging if needed
572
583
  i = 1;
573
- for await (const page of client.listModels().byPage()) {
584
+ for await (const page of client.listDocumentModels().byPage()) {
574
585
  for (const summary of page) {
575
586
  console.log(`Model ${i++}`, summary);
576
587
  }
577
588
  }
578
589
 
579
590
  // We can also get a full ModelInfo by ID. Here we only show the basic information. See the documentation and the
580
- // `getModel` sample program for information about the `docTypes` field, which contains the model's document type
591
+ // `getDocumentModel` sample program for information about the `docTypes` field, which contains the model's document type
581
592
  // schemas.
582
- const model = await client.getModel("<model ID>");
593
+ const model = await client.getDocumentModel("<model ID>");
583
594
  console.log("ID", model.modelId);
584
- console.log("Created:", model.createdDateTime);
595
+ console.log("Created:", model.createdOn);
585
596
  console.log("Description: ", model.description ?? "<none>");
586
597
 
587
598
  // A model can also be deleted by its model ID. Once it is deleted, it CANNOT be recovered.
588
599
  const modelIdToDelete = "<model ID that should be deleted forever>";
589
- await client.deleteModel(modelIdToDelete);
600
+ await client.deleteDocumentModel(modelIdToDelete);
590
601
  }
591
602
 
592
603
  main().catch((err) => {
@@ -596,7 +607,7 @@ main().catch((err) => {
596
607
 
597
608
  ## Troubleshooting
598
609
 
599
- ### Form Recognizer Errors
610
+ ### Form Recognizer errors
600
611
 
601
612
  For information about the error messages and codes produced by the Form Recognizer service, please refer to [the service's error documentation][fr-errors].
602
613
 
@@ -635,6 +646,17 @@ If you'd like to contribute to this library, please read the [contributing guide
635
646
  [azure_portal_create_fr_resource]: https://ms.portal.azure.com/#create/Microsoft.CognitiveServicesFormRecognizer
636
647
  [azure_cli_create_fr_resource]: https://docs.microsoft.com/azure/cognitive-services/cognitive-services-apis-create-account-cli?tabs=windows
637
648
  [fr-labeling-tool]: https://aka.ms/azsdk/formrecognizer/labelingtool
649
+ [fr-studio]: https://formrecognizer.appliedai.azure.com/studio
638
650
  [fr-build-training-set]: https://aka.ms/azsdk/formrecognizer/buildtrainingset
639
651
  [fr-errors]: https://aka.ms/azsdk/formrecognizer/errors
640
652
  [fr-models]: https://aka.ms/azsdk/formrecognizer/models
653
+ [samples-prebuilt]: https://github.com/azure/azure-sdk-for-js/tree/main/sdk/formrecognizer/ai-form-recognizer/samples-dev/prebuilt/
654
+ [samples-prebuilt-businesscard]: https://github.com/azure/azure-sdk-for-js/blob/main/sdk/formrecognizer/ai-form-recognizer/samples-dev/prebuilt/prebuilt-businessCard.ts
655
+ [samples-prebuilt-document]: https://github.com/azure/azure-sdk-for-js/blob/main/sdk/formrecognizer/ai-form-recognizer/samples-dev/prebuilt/prebuilt-document.ts
656
+ [samples-prebuilt-healthinsurancecard]: https://github.com/azure/azure-sdk-for-js/blob/main/sdk/formrecognizer/ai-form-recognizer/samples-dev/prebuilt/prebuilt-healthInsuranceCard.ts
657
+ [samples-prebuilt-iddocument]: https://github.com/azure/azure-sdk-for-js/blob/main/sdk/formrecognizer/ai-form-recognizer/samples-dev/prebuilt/prebuilt-idDocument.ts
658
+ [samples-prebuilt-invoice]: https://github.com/azure/azure-sdk-for-js/blob/main/sdk/formrecognizer/ai-form-recognizer/samples-dev/prebuilt/prebuilt-invoice.ts
659
+ [samples-prebuilt-layout]: https://github.com/azure/azure-sdk-for-js/blob/main/sdk/formrecognizer/ai-form-recognizer/samples-dev/prebuilt/prebuilt-layout.ts
660
+ [samples-prebuilt-read]: https://github.com/azure/azure-sdk-for-js/blob/main/sdk/formrecognizer/ai-form-recognizer/samples-dev/prebuilt/prebuilt-read.ts
661
+ [samples-prebuilt-receipt]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/formrecognizer/ai-form-recognizer/samples-dev/prebuilt/prebuilt-receipt.ts
662
+ [samples-prebuilt-tax.us.w2]: https://github.com/azure/azure-sdk-for-js/blob/main/sdk/formrecognizer/ai-form-recognizer/samples-dev/prebuilt/prebuilt-tax.us.w2.ts