oci-aivision 2.21.0 → 2.25.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.d.ts +2 -2
- package/index.js +2 -2
- package/lib/aiservicevision-waiter.d.ts +2 -2
- package/lib/aiservicevision-waiter.js +2 -2
- package/lib/client.d.ts +46 -45
- package/lib/client.js +51 -46
- package/lib/client.js.map +1 -1
- package/lib/model/action-type.d.ts +2 -2
- package/lib/model/action-type.js +2 -2
- package/lib/model/analyze-document-details.d.ts +6 -6
- package/lib/model/analyze-document-details.js +2 -2
- package/lib/model/analyze-document-result.d.ts +11 -11
- package/lib/model/analyze-document-result.js +2 -2
- package/lib/model/analyze-image-details.d.ts +5 -5
- package/lib/model/analyze-image-details.js +2 -2
- package/lib/model/analyze-image-result.d.ts +10 -10
- package/lib/model/analyze-image-result.js +2 -2
- package/lib/model/bounding-polygon.d.ts +4 -4
- package/lib/model/bounding-polygon.js +2 -2
- package/lib/model/cell.d.ts +7 -7
- package/lib/model/cell.js +2 -2
- package/lib/model/change-model-compartment-details.d.ts +3 -3
- package/lib/model/change-model-compartment-details.js +2 -2
- package/lib/model/change-project-compartment-details.d.ts +2 -2
- package/lib/model/change-project-compartment-details.js +2 -2
- package/lib/model/create-document-job-details.d.ts +8 -8
- package/lib/model/create-document-job-details.js +2 -2
- package/lib/model/create-image-job-details.d.ts +7 -7
- package/lib/model/create-image-job-details.js +2 -2
- package/lib/model/create-model-details.d.ts +14 -14
- package/lib/model/create-model-details.js +2 -2
- package/lib/model/create-project-details.d.ts +9 -9
- package/lib/model/create-project-details.js +2 -2
- package/lib/model/data-science-labeling-dataset.d.ts +3 -3
- package/lib/model/data-science-labeling-dataset.js +2 -2
- package/lib/model/dataset.d.ts +2 -2
- package/lib/model/dataset.js +2 -2
- package/lib/model/detected-document-type.d.ts +5 -5
- package/lib/model/detected-document-type.js +2 -2
- package/lib/model/detected-language.d.ts +5 -5
- package/lib/model/detected-language.js +2 -2
- package/lib/model/dimensions.d.ts +6 -6
- package/lib/model/dimensions.js +2 -2
- package/lib/model/document-classification-feature.d.ts +4 -4
- package/lib/model/document-classification-feature.js +2 -2
- package/lib/model/document-details.d.ts +3 -3
- package/lib/model/document-details.js +2 -2
- package/lib/model/document-feature.d.ts +3 -3
- package/lib/model/document-feature.js +2 -2
- package/lib/model/document-field.d.ts +3 -3
- package/lib/model/document-field.js +2 -2
- package/lib/model/document-job.d.ts +14 -14
- package/lib/model/document-job.js +2 -2
- package/lib/model/document-key-value-detection-feature.d.ts +2 -2
- package/lib/model/document-key-value-detection-feature.js +2 -2
- package/lib/model/document-language-classification-feature.d.ts +2 -2
- package/lib/model/document-language-classification-feature.js +2 -2
- package/lib/model/document-language.d.ts +3 -3
- package/lib/model/document-language.js +3 -3
- package/lib/model/document-metadata.d.ts +5 -5
- package/lib/model/document-metadata.js +2 -2
- package/lib/model/document-table-detection-feature.d.ts +2 -2
- package/lib/model/document-table-detection-feature.js +2 -2
- package/lib/model/document-text-detection-feature.d.ts +3 -3
- package/lib/model/document-text-detection-feature.js +2 -2
- package/lib/model/document-type.d.ts +3 -3
- package/lib/model/document-type.js +3 -3
- package/lib/model/field-label.d.ts +5 -5
- package/lib/model/field-label.js +2 -2
- package/lib/model/field-name.d.ts +6 -6
- package/lib/model/field-name.js +2 -2
- package/lib/model/field-value.d.ts +6 -6
- package/lib/model/field-value.js +2 -2
- package/lib/model/image-classification-feature.d.ts +4 -4
- package/lib/model/image-classification-feature.js +2 -2
- package/lib/model/image-details.d.ts +3 -3
- package/lib/model/image-details.js +2 -2
- package/lib/model/image-feature.d.ts +3 -3
- package/lib/model/image-feature.js +2 -2
- package/lib/model/image-job.d.ts +12 -12
- package/lib/model/image-job.js +2 -2
- package/lib/model/image-object-detection-feature.d.ts +4 -4
- package/lib/model/image-object-detection-feature.js +2 -2
- package/lib/model/image-object.d.ts +5 -5
- package/lib/model/image-object.js +2 -2
- package/lib/model/image-text-detection-feature.d.ts +4 -4
- package/lib/model/image-text-detection-feature.js +2 -2
- package/lib/model/image-text.d.ts +5 -5
- package/lib/model/image-text.js +2 -2
- package/lib/model/index.d.ts +2 -2
- package/lib/model/index.js +2 -2
- package/lib/model/inline-document-details.d.ts +4 -4
- package/lib/model/inline-document-details.js +2 -2
- package/lib/model/inline-image-details.d.ts +4 -4
- package/lib/model/inline-image-details.js +2 -2
- package/lib/model/input-location.d.ts +3 -3
- package/lib/model/input-location.js +2 -2
- package/lib/model/label.d.ts +5 -5
- package/lib/model/label.js +2 -2
- package/lib/model/line.d.ts +6 -6
- package/lib/model/line.js +2 -2
- package/lib/model/model-collection.d.ts +4 -4
- package/lib/model/model-collection.js +2 -2
- package/lib/model/model-summary.d.ts +14 -14
- package/lib/model/model-summary.js +2 -2
- package/lib/model/model.d.ts +22 -22
- package/lib/model/model.js +2 -2
- package/lib/model/normalized-vertex.d.ts +4 -4
- package/lib/model/normalized-vertex.js +2 -2
- package/lib/model/object-list-inline-input-location.d.ts +3 -3
- package/lib/model/object-list-inline-input-location.js +2 -2
- package/lib/model/object-location.d.ts +5 -5
- package/lib/model/object-location.js +2 -2
- package/lib/model/object-storage-dataset.d.ts +3 -3
- package/lib/model/object-storage-dataset.js +2 -2
- package/lib/model/object-storage-document-details.d.ts +5 -5
- package/lib/model/object-storage-document-details.js +2 -2
- package/lib/model/object-storage-image-details.d.ts +6 -6
- package/lib/model/object-storage-image-details.js +2 -2
- package/lib/model/ontology-class.d.ts +5 -5
- package/lib/model/ontology-class.js +2 -2
- package/lib/model/operation-status.d.ts +3 -3
- package/lib/model/operation-status.js +3 -3
- package/lib/model/operation-type.d.ts +2 -2
- package/lib/model/operation-type.js +2 -2
- package/lib/model/output-location.d.ts +6 -6
- package/lib/model/output-location.js +2 -2
- package/lib/model/page.d.ts +7 -7
- package/lib/model/page.js +2 -2
- package/lib/model/processing-error.d.ts +5 -5
- package/lib/model/processing-error.js +2 -2
- package/lib/model/project-collection.d.ts +3 -3
- package/lib/model/project-collection.js +2 -2
- package/lib/model/project-summary.d.ts +11 -11
- package/lib/model/project-summary.js +2 -2
- package/lib/model/project.d.ts +12 -12
- package/lib/model/project.js +2 -2
- package/lib/model/sort-order.d.ts +2 -2
- package/lib/model/sort-order.js +2 -2
- package/lib/model/table-row.d.ts +3 -3
- package/lib/model/table-row.js +2 -2
- package/lib/model/table.d.ts +9 -9
- package/lib/model/table.js +2 -2
- package/lib/model/update-model-details.d.ts +8 -8
- package/lib/model/update-model-details.js +2 -2
- package/lib/model/update-project-details.d.ts +8 -8
- package/lib/model/update-project-details.js +2 -2
- package/lib/model/value-array.d.ts +3 -3
- package/lib/model/value-array.js +2 -2
- package/lib/model/value-date.d.ts +4 -4
- package/lib/model/value-date.js +2 -2
- package/lib/model/value-integer.d.ts +4 -4
- package/lib/model/value-integer.js +2 -2
- package/lib/model/value-number.d.ts +4 -4
- package/lib/model/value-number.js +2 -2
- package/lib/model/value-phone-number.d.ts +4 -4
- package/lib/model/value-phone-number.js +2 -2
- package/lib/model/value-string.d.ts +4 -4
- package/lib/model/value-string.js +2 -2
- package/lib/model/value-time.d.ts +4 -4
- package/lib/model/value-time.js +2 -2
- package/lib/model/word.d.ts +4 -4
- package/lib/model/word.js +2 -2
- package/lib/model/work-request-error-collection.d.ts +4 -4
- package/lib/model/work-request-error-collection.js +2 -2
- package/lib/model/work-request-error.d.ts +3 -3
- package/lib/model/work-request-error.js +2 -2
- package/lib/model/work-request-log-entry-collection.d.ts +4 -4
- package/lib/model/work-request-log-entry-collection.js +2 -2
- package/lib/model/work-request-log-entry.d.ts +3 -3
- package/lib/model/work-request-log-entry.js +2 -2
- package/lib/model/work-request-resource.d.ts +6 -6
- package/lib/model/work-request-resource.js +2 -2
- package/lib/model/work-request-summary-collection.d.ts +4 -4
- package/lib/model/work-request-summary-collection.js +2 -2
- package/lib/model/work-request-summary.d.ts +9 -9
- package/lib/model/work-request-summary.js +2 -2
- package/lib/model/work-request.d.ts +7 -7
- package/lib/model/work-request.js +2 -2
- package/lib/request/analyze-document-request.d.ts +2 -2
- package/lib/request/analyze-image-request.d.ts +1 -1
- package/lib/request/cancel-document-job-request.d.ts +1 -1
- package/lib/request/cancel-image-job-request.d.ts +1 -1
- package/lib/request/cancel-work-request-request.d.ts +1 -1
- package/lib/request/change-model-compartment-request.d.ts +3 -3
- package/lib/request/change-project-compartment-request.d.ts +2 -2
- package/lib/request/create-document-job-request.d.ts +3 -3
- package/lib/request/create-image-job-request.d.ts +3 -3
- package/lib/request/create-model-request.d.ts +3 -3
- package/lib/request/create-project-request.d.ts +3 -3
- package/lib/request/delete-model-request.d.ts +2 -2
- package/lib/request/delete-project-request.d.ts +2 -2
- package/lib/request/get-document-job-request.d.ts +1 -1
- package/lib/request/get-image-job-request.d.ts +1 -1
- package/lib/request/get-model-request.d.ts +2 -2
- package/lib/request/get-project-request.d.ts +2 -2
- package/lib/request/get-work-request-request.d.ts +1 -1
- package/lib/request/index.d.ts +2 -2
- package/lib/request/index.js +2 -2
- package/lib/request/list-models-request.d.ts +4 -4
- package/lib/request/list-projects-request.d.ts +4 -4
- package/lib/request/list-work-request-errors-request.d.ts +2 -2
- package/lib/request/list-work-request-logs-request.d.ts +2 -2
- package/lib/request/list-work-requests-request.d.ts +3 -3
- package/lib/request/update-model-request.d.ts +3 -3
- package/lib/request/update-project-request.d.ts +3 -3
- package/lib/response/analyze-document-response.d.ts +1 -1
- package/lib/response/analyze-image-response.d.ts +1 -1
- package/lib/response/cancel-document-job-response.d.ts +1 -1
- package/lib/response/cancel-image-job-response.d.ts +1 -1
- package/lib/response/cancel-work-request-response.d.ts +1 -1
- package/lib/response/change-model-compartment-response.d.ts +1 -1
- package/lib/response/change-project-compartment-response.d.ts +1 -1
- package/lib/response/create-document-job-response.d.ts +1 -1
- package/lib/response/create-image-job-response.d.ts +1 -1
- package/lib/response/create-model-response.d.ts +2 -2
- package/lib/response/create-project-response.d.ts +2 -2
- package/lib/response/delete-model-response.d.ts +2 -2
- package/lib/response/delete-project-response.d.ts +2 -2
- package/lib/response/get-document-job-response.d.ts +1 -1
- package/lib/response/get-image-job-response.d.ts +1 -1
- package/lib/response/get-model-response.d.ts +1 -1
- package/lib/response/get-project-response.d.ts +1 -1
- package/lib/response/get-work-request-response.d.ts +1 -1
- package/lib/response/index.d.ts +2 -2
- package/lib/response/index.js +2 -2
- package/lib/response/list-models-response.d.ts +1 -1
- package/lib/response/list-projects-response.d.ts +1 -1
- package/lib/response/list-work-request-errors-response.d.ts +1 -1
- package/lib/response/list-work-request-logs-response.d.ts +1 -1
- package/lib/response/list-work-requests-response.d.ts +1 -1
- package/lib/response/update-model-response.d.ts +2 -2
- package/lib/response/update-project-response.d.ts +2 -2
- package/package.json +3 -3
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
/**
|
|
3
|
-
*
|
|
4
|
-
*
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
5
|
* OpenAPI spec version: 20220125
|
|
6
6
|
*
|
|
7
7
|
*
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
|
-
*
|
|
3
|
-
*
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
4
|
* OpenAPI spec version: 20220125
|
|
5
5
|
*
|
|
6
6
|
*
|
|
@@ -12,12 +12,12 @@
|
|
|
12
12
|
*/
|
|
13
13
|
import * as model from "../model";
|
|
14
14
|
/**
|
|
15
|
-
*
|
|
15
|
+
* The document analysis results.
|
|
16
16
|
*/
|
|
17
17
|
export interface AnalyzeDocumentResult {
|
|
18
18
|
"documentMetadata": model.DocumentMetadata;
|
|
19
19
|
/**
|
|
20
|
-
*
|
|
20
|
+
* The array of a Page.
|
|
21
21
|
*/
|
|
22
22
|
"pages": Array<model.Page>;
|
|
23
23
|
/**
|
|
@@ -29,31 +29,31 @@ export interface AnalyzeDocumentResult {
|
|
|
29
29
|
*/
|
|
30
30
|
"detectedLanguages"?: Array<model.DetectedLanguage>;
|
|
31
31
|
/**
|
|
32
|
-
*
|
|
32
|
+
* The document classification model version.
|
|
33
33
|
*/
|
|
34
34
|
"documentClassificationModelVersion"?: string;
|
|
35
35
|
/**
|
|
36
|
-
*
|
|
36
|
+
* The document language classification model version.
|
|
37
37
|
*/
|
|
38
38
|
"languageClassificationModelVersion"?: string;
|
|
39
39
|
/**
|
|
40
|
-
*
|
|
40
|
+
* The document text detection model version.
|
|
41
41
|
*/
|
|
42
42
|
"textDetectionModelVersion"?: string;
|
|
43
43
|
/**
|
|
44
|
-
*
|
|
44
|
+
* The document keyValue detection model version.
|
|
45
45
|
*/
|
|
46
46
|
"keyValueDetectionModelVersion"?: string;
|
|
47
47
|
/**
|
|
48
|
-
*
|
|
48
|
+
* The document table detection model version.
|
|
49
49
|
*/
|
|
50
50
|
"tableDetectionModelVersion"?: string;
|
|
51
51
|
/**
|
|
52
|
-
*
|
|
52
|
+
* The errors encountered during document analysis.
|
|
53
53
|
*/
|
|
54
54
|
"errors"?: Array<model.ProcessingError>;
|
|
55
55
|
/**
|
|
56
|
-
*
|
|
56
|
+
* The searchable PDF file that was generated.
|
|
57
57
|
*/
|
|
58
58
|
"searchablePdf"?: string;
|
|
59
59
|
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
/**
|
|
3
|
-
*
|
|
4
|
-
*
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
5
|
* OpenAPI spec version: 20220125
|
|
6
6
|
*
|
|
7
7
|
*
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
|
-
*
|
|
3
|
-
*
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
4
|
* OpenAPI spec version: 20220125
|
|
5
5
|
*
|
|
6
6
|
*
|
|
@@ -12,16 +12,16 @@
|
|
|
12
12
|
*/
|
|
13
13
|
import * as model from "../model";
|
|
14
14
|
/**
|
|
15
|
-
*
|
|
15
|
+
* The details of how to analyze an image.
|
|
16
16
|
*/
|
|
17
17
|
export interface AnalyzeImageDetails {
|
|
18
18
|
/**
|
|
19
|
-
*
|
|
19
|
+
* The types of image analysis.
|
|
20
20
|
*/
|
|
21
21
|
"features": Array<model.ImageFeature>;
|
|
22
22
|
"image": model.ObjectStorageImageDetails | model.InlineImageDetails;
|
|
23
23
|
/**
|
|
24
|
-
* The
|
|
24
|
+
* The OCID of the compartment that calls the API.
|
|
25
25
|
*/
|
|
26
26
|
"compartmentId"?: string;
|
|
27
27
|
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
/**
|
|
3
|
-
*
|
|
4
|
-
*
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
5
|
* OpenAPI spec version: 20220125
|
|
6
6
|
*
|
|
7
7
|
*
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
|
-
*
|
|
3
|
-
*
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
4
|
* OpenAPI spec version: 20220125
|
|
5
5
|
*
|
|
6
6
|
*
|
|
@@ -12,36 +12,36 @@
|
|
|
12
12
|
*/
|
|
13
13
|
import * as model from "../model";
|
|
14
14
|
/**
|
|
15
|
-
*
|
|
15
|
+
* The image analysis results.
|
|
16
16
|
*/
|
|
17
17
|
export interface AnalyzeImageResult {
|
|
18
18
|
/**
|
|
19
|
-
*
|
|
19
|
+
* The detected objects.
|
|
20
20
|
*/
|
|
21
21
|
"imageObjects"?: Array<model.ImageObject>;
|
|
22
22
|
/**
|
|
23
|
-
*
|
|
23
|
+
* The image classification labels.
|
|
24
24
|
*/
|
|
25
25
|
"labels"?: Array<model.Label>;
|
|
26
26
|
/**
|
|
27
|
-
* ontologyClasses of image labels.
|
|
27
|
+
* The ontologyClasses of image labels.
|
|
28
28
|
*/
|
|
29
29
|
"ontologyClasses"?: Array<model.OntologyClass>;
|
|
30
30
|
"imageText"?: model.ImageText;
|
|
31
31
|
/**
|
|
32
|
-
*
|
|
32
|
+
* The image classification model version.
|
|
33
33
|
*/
|
|
34
34
|
"imageClassificationModelVersion"?: string;
|
|
35
35
|
/**
|
|
36
|
-
*
|
|
36
|
+
* The object detection model version.
|
|
37
37
|
*/
|
|
38
38
|
"objectDetectionModelVersion"?: string;
|
|
39
39
|
/**
|
|
40
|
-
*
|
|
40
|
+
* The text detection model version.
|
|
41
41
|
*/
|
|
42
42
|
"textDetectionModelVersion"?: string;
|
|
43
43
|
/**
|
|
44
|
-
*
|
|
44
|
+
* The errors encountered during image analysis.
|
|
45
45
|
*/
|
|
46
46
|
"errors"?: Array<model.ProcessingError>;
|
|
47
47
|
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
/**
|
|
3
|
-
*
|
|
4
|
-
*
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
5
|
* OpenAPI spec version: 20220125
|
|
6
6
|
*
|
|
7
7
|
*
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
|
-
*
|
|
3
|
-
*
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
4
|
* OpenAPI spec version: 20220125
|
|
5
5
|
*
|
|
6
6
|
*
|
|
@@ -12,12 +12,12 @@
|
|
|
12
12
|
*/
|
|
13
13
|
import * as model from "../model";
|
|
14
14
|
/**
|
|
15
|
-
*
|
|
15
|
+
* The object-bounding polygon box.
|
|
16
16
|
*/
|
|
17
17
|
export interface BoundingPolygon {
|
|
18
18
|
/**
|
|
19
19
|
* An array of normalized points defining the polygon's perimeter, with an implicit segment between subsequent points and between the first and last point.
|
|
20
|
-
* Rectangles are defined with four points,
|
|
20
|
+
* Rectangles are defined with four points. For example, `[{\"x\": 0, \"y\": 0}, {\"x\": 1, \"y\": 0}, {\"x\": 1, \"y\": 0.5}, {\"x\": 0, \"y\": 0.5}]` represents the top half of an image.
|
|
21
21
|
*
|
|
22
22
|
*/
|
|
23
23
|
"normalizedVertices": Array<model.NormalizedVertex>;
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
/**
|
|
3
|
-
*
|
|
4
|
-
*
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
5
|
* OpenAPI spec version: 20220125
|
|
6
6
|
*
|
|
7
7
|
*
|
package/lib/model/cell.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
|
-
*
|
|
3
|
-
*
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
4
|
* OpenAPI spec version: 20220125
|
|
5
5
|
*
|
|
6
6
|
*
|
|
@@ -16,24 +16,24 @@ import * as model from "../model";
|
|
|
16
16
|
*/
|
|
17
17
|
export interface Cell {
|
|
18
18
|
/**
|
|
19
|
-
*
|
|
19
|
+
* The text recognized in the cell.
|
|
20
20
|
*/
|
|
21
21
|
"text": string;
|
|
22
22
|
/**
|
|
23
|
-
*
|
|
23
|
+
* The index of the cell inside the row. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
24
24
|
*/
|
|
25
25
|
"rowIndex": number;
|
|
26
26
|
/**
|
|
27
|
-
*
|
|
27
|
+
* The index of the cell inside the column. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
28
28
|
*/
|
|
29
29
|
"columnIndex": number;
|
|
30
30
|
/**
|
|
31
|
-
*
|
|
31
|
+
* The confidence score between 0 and 1. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
32
32
|
*/
|
|
33
33
|
"confidence": number;
|
|
34
34
|
"boundingPolygon": model.BoundingPolygon;
|
|
35
35
|
/**
|
|
36
|
-
*
|
|
36
|
+
* The words detected in the cell.
|
|
37
37
|
*/
|
|
38
38
|
"wordIndexes": Array<number>;
|
|
39
39
|
}
|
package/lib/model/cell.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
/**
|
|
3
|
-
*
|
|
4
|
-
*
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
5
|
* OpenAPI spec version: 20220125
|
|
6
6
|
*
|
|
7
7
|
*
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
|
-
*
|
|
3
|
-
*
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
4
|
* OpenAPI spec version: 20220125
|
|
5
5
|
*
|
|
6
6
|
*
|
|
@@ -11,7 +11,7 @@
|
|
|
11
11
|
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
12
12
|
*/
|
|
13
13
|
/**
|
|
14
|
-
*
|
|
14
|
+
* The compartment the model should be moved to.
|
|
15
15
|
*/
|
|
16
16
|
export interface ChangeModelCompartmentDetails {
|
|
17
17
|
/**
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
/**
|
|
3
|
-
*
|
|
4
|
-
*
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
5
|
* OpenAPI spec version: 20220125
|
|
6
6
|
*
|
|
7
7
|
*
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
|
-
*
|
|
3
|
-
*
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
4
|
* OpenAPI spec version: 20220125
|
|
5
5
|
*
|
|
6
6
|
*
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
/**
|
|
3
|
-
*
|
|
4
|
-
*
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
5
|
* OpenAPI spec version: 20220125
|
|
6
6
|
*
|
|
7
7
|
*
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
|
-
*
|
|
3
|
-
*
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
4
|
* OpenAPI spec version: 20220125
|
|
5
5
|
*
|
|
6
6
|
*
|
|
@@ -12,25 +12,25 @@
|
|
|
12
12
|
*/
|
|
13
13
|
import * as model from "../model";
|
|
14
14
|
/**
|
|
15
|
-
*
|
|
15
|
+
* The batch document analysis details.
|
|
16
16
|
*/
|
|
17
17
|
export interface CreateDocumentJobDetails {
|
|
18
18
|
"inputLocation": model.ObjectListInlineInputLocation;
|
|
19
19
|
/**
|
|
20
|
-
*
|
|
20
|
+
* The list of requested document analysis types.
|
|
21
21
|
*/
|
|
22
22
|
"features": Array<model.DocumentFeature>;
|
|
23
23
|
"outputLocation": model.OutputLocation;
|
|
24
24
|
/**
|
|
25
|
-
*
|
|
25
|
+
* The compartment identifier from the requester.
|
|
26
26
|
*/
|
|
27
27
|
"compartmentId"?: string;
|
|
28
28
|
/**
|
|
29
|
-
*
|
|
29
|
+
* The document job display name.
|
|
30
30
|
*/
|
|
31
31
|
"displayName"?: string;
|
|
32
32
|
/**
|
|
33
|
-
*
|
|
33
|
+
* The language of the document, abbreviated according to ISO 639-2.
|
|
34
34
|
*/
|
|
35
35
|
"language"?: model.DocumentLanguage;
|
|
36
36
|
/**
|
|
@@ -38,7 +38,7 @@ export interface CreateDocumentJobDetails {
|
|
|
38
38
|
*/
|
|
39
39
|
"documentType"?: model.DocumentType;
|
|
40
40
|
/**
|
|
41
|
-
* Whether to generate a
|
|
41
|
+
* Whether or not to generate a ZIP file containing the results.
|
|
42
42
|
*/
|
|
43
43
|
"isZipOutputEnabled"?: boolean;
|
|
44
44
|
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
/**
|
|
3
|
-
*
|
|
4
|
-
*
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
5
|
* OpenAPI spec version: 20220125
|
|
6
6
|
*
|
|
7
7
|
*
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
|
-
*
|
|
3
|
-
*
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
4
|
* OpenAPI spec version: 20220125
|
|
5
5
|
*
|
|
6
6
|
*
|
|
@@ -12,25 +12,25 @@
|
|
|
12
12
|
*/
|
|
13
13
|
import * as model from "../model";
|
|
14
14
|
/**
|
|
15
|
-
*
|
|
15
|
+
* The details of the batch image analysis.
|
|
16
16
|
*/
|
|
17
17
|
export interface CreateImageJobDetails {
|
|
18
18
|
"inputLocation": model.ObjectListInlineInputLocation;
|
|
19
19
|
/**
|
|
20
|
-
*
|
|
20
|
+
* The list of requested image analysis types.
|
|
21
21
|
*/
|
|
22
22
|
"features": Array<model.ImageFeature>;
|
|
23
23
|
"outputLocation": model.OutputLocation;
|
|
24
24
|
/**
|
|
25
|
-
*
|
|
25
|
+
* The compartment identifier from the requester.
|
|
26
26
|
*/
|
|
27
27
|
"compartmentId"?: string;
|
|
28
28
|
/**
|
|
29
|
-
*
|
|
29
|
+
* The image job display name.
|
|
30
30
|
*/
|
|
31
31
|
"displayName"?: string;
|
|
32
32
|
/**
|
|
33
|
-
* Whether to generate a
|
|
33
|
+
* Whether or not to generate a ZIP file containing the results.
|
|
34
34
|
*/
|
|
35
35
|
"isZipOutputEnabled"?: boolean;
|
|
36
36
|
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
/**
|
|
3
|
-
*
|
|
4
|
-
*
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
5
|
* OpenAPI spec version: 20220125
|
|
6
6
|
*
|
|
7
7
|
*
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
|
-
*
|
|
3
|
-
*
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
4
|
* OpenAPI spec version: 20220125
|
|
5
5
|
*
|
|
6
6
|
*
|
|
@@ -12,47 +12,47 @@
|
|
|
12
12
|
*/
|
|
13
13
|
import * as model from "../model";
|
|
14
14
|
/**
|
|
15
|
-
*
|
|
15
|
+
* The information needed to create a new model.
|
|
16
16
|
*/
|
|
17
17
|
export interface CreateModelDetails {
|
|
18
18
|
/**
|
|
19
|
-
*
|
|
19
|
+
* A human-friendly name for the model, which can be changed.
|
|
20
20
|
*/
|
|
21
21
|
"displayName"?: string;
|
|
22
22
|
/**
|
|
23
|
-
*
|
|
23
|
+
* An optional description of the model.
|
|
24
24
|
*/
|
|
25
25
|
"description"?: string;
|
|
26
26
|
/**
|
|
27
|
-
*
|
|
27
|
+
* The model version
|
|
28
28
|
*/
|
|
29
29
|
"modelVersion"?: string;
|
|
30
30
|
/**
|
|
31
|
-
*
|
|
31
|
+
* Which type of Vision model this is.
|
|
32
32
|
*/
|
|
33
33
|
"modelType": string;
|
|
34
34
|
/**
|
|
35
|
-
*
|
|
35
|
+
* The compartment identifier.
|
|
36
36
|
*/
|
|
37
37
|
"compartmentId": string;
|
|
38
38
|
/**
|
|
39
|
-
* Set to true when experimenting with a new model type or dataset so model training is quick, with a predefined low number of passes through the training data.
|
|
39
|
+
* Set to true when experimenting with a new model type or dataset, so the model training is quick, with a predefined low number of passes through the training data.
|
|
40
40
|
*/
|
|
41
41
|
"isQuickMode"?: boolean;
|
|
42
42
|
/**
|
|
43
|
-
*
|
|
43
|
+
* The maximum model training duration in hours, expressed as a decimal fraction. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
44
44
|
*/
|
|
45
45
|
"maxTrainingDurationInHours"?: number;
|
|
46
46
|
"trainingDataset": model.DataScienceLabelingDataset | model.ObjectStorageDataset;
|
|
47
47
|
"testingDataset"?: model.DataScienceLabelingDataset | model.ObjectStorageDataset;
|
|
48
48
|
"validationDataset"?: model.DataScienceLabelingDataset | model.ObjectStorageDataset;
|
|
49
49
|
/**
|
|
50
|
-
* The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the project
|
|
50
|
+
* The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the project that contains the model.
|
|
51
51
|
*/
|
|
52
52
|
"projectId": string;
|
|
53
53
|
/**
|
|
54
|
-
*
|
|
55
|
-
*
|
|
54
|
+
* A simple key-value pair that is applied without any predefined name, type, or scope. It exists for cross-compatibility only.
|
|
55
|
+
* For example: `{\"bar-key\": \"value\"}`
|
|
56
56
|
*
|
|
57
57
|
*/
|
|
58
58
|
"freeformTags"?: {
|
|
@@ -60,7 +60,7 @@ export interface CreateModelDetails {
|
|
|
60
60
|
};
|
|
61
61
|
/**
|
|
62
62
|
* Defined tags for this resource. Each key is predefined and scoped to a namespace.
|
|
63
|
-
*
|
|
63
|
+
* For example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
|
|
64
64
|
*
|
|
65
65
|
*/
|
|
66
66
|
"definedTags"?: {
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
/**
|
|
3
|
-
*
|
|
4
|
-
*
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
5
|
* OpenAPI spec version: 20220125
|
|
6
6
|
*
|
|
7
7
|
*
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
|
-
*
|
|
3
|
-
*
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
4
|
* OpenAPI spec version: 20220125
|
|
5
5
|
*
|
|
6
6
|
*
|
|
@@ -11,24 +11,24 @@
|
|
|
11
11
|
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
12
12
|
*/
|
|
13
13
|
/**
|
|
14
|
-
*
|
|
14
|
+
* The information needed to create a new project.
|
|
15
15
|
*/
|
|
16
16
|
export interface CreateProjectDetails {
|
|
17
17
|
/**
|
|
18
|
-
*
|
|
18
|
+
* A human-friendly name for the project, that can be changed.
|
|
19
19
|
*/
|
|
20
20
|
"displayName"?: string;
|
|
21
21
|
/**
|
|
22
|
-
*
|
|
22
|
+
* An optional description of the project.
|
|
23
23
|
*/
|
|
24
24
|
"description"?: string;
|
|
25
25
|
/**
|
|
26
|
-
*
|
|
26
|
+
* The compartment identifier.
|
|
27
27
|
*/
|
|
28
28
|
"compartmentId": string;
|
|
29
29
|
/**
|
|
30
|
-
*
|
|
31
|
-
*
|
|
30
|
+
* A simple key-value pair that is applied without any predefined name, type, or scope. It exists for cross-compatibility only.
|
|
31
|
+
* For example: `{\"bar-key\": \"value\"}`
|
|
32
32
|
*
|
|
33
33
|
*/
|
|
34
34
|
"freeformTags"?: {
|
|
@@ -36,7 +36,7 @@ export interface CreateProjectDetails {
|
|
|
36
36
|
};
|
|
37
37
|
/**
|
|
38
38
|
* Defined tags for this resource. Each key is predefined and scoped to a namespace.
|
|
39
|
-
*
|
|
39
|
+
* For example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
|
|
40
40
|
*
|
|
41
41
|
*/
|
|
42
42
|
"definedTags"?: {
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
/**
|
|
3
|
-
*
|
|
4
|
-
*
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
5
|
* OpenAPI spec version: 20220125
|
|
6
6
|
*
|
|
7
7
|
*
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
|
-
*
|
|
3
|
-
*
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
4
|
* OpenAPI spec version: 20220125
|
|
5
5
|
*
|
|
6
6
|
*
|
|
@@ -12,7 +12,7 @@
|
|
|
12
12
|
*/
|
|
13
13
|
import * as model from "../model";
|
|
14
14
|
/**
|
|
15
|
-
*
|
|
15
|
+
* The dataset created by the Data Labeling Service.
|
|
16
16
|
*/
|
|
17
17
|
export interface DataScienceLabelingDataset extends model.Dataset {
|
|
18
18
|
/**
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
/**
|
|
3
|
-
*
|
|
4
|
-
*
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
5
|
* OpenAPI spec version: 20220125
|
|
6
6
|
*
|
|
7
7
|
*
|
package/lib/model/dataset.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
|
-
*
|
|
3
|
-
*
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
4
|
* OpenAPI spec version: 20220125
|
|
5
5
|
*
|
|
6
6
|
*
|