oci-aivision 2.20.0 → 2.24.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (234) hide show
  1. package/index.d.ts +2 -2
  2. package/index.js +2 -2
  3. package/lib/aiservicevision-waiter.d.ts +2 -2
  4. package/lib/aiservicevision-waiter.js +2 -2
  5. package/lib/client.d.ts +46 -45
  6. package/lib/client.js +51 -46
  7. package/lib/client.js.map +1 -1
  8. package/lib/model/action-type.d.ts +2 -2
  9. package/lib/model/action-type.js +2 -2
  10. package/lib/model/analyze-document-details.d.ts +6 -6
  11. package/lib/model/analyze-document-details.js +2 -2
  12. package/lib/model/analyze-document-result.d.ts +11 -11
  13. package/lib/model/analyze-document-result.js +2 -2
  14. package/lib/model/analyze-image-details.d.ts +5 -5
  15. package/lib/model/analyze-image-details.js +2 -2
  16. package/lib/model/analyze-image-result.d.ts +10 -10
  17. package/lib/model/analyze-image-result.js +2 -2
  18. package/lib/model/bounding-polygon.d.ts +4 -4
  19. package/lib/model/bounding-polygon.js +2 -2
  20. package/lib/model/cell.d.ts +7 -7
  21. package/lib/model/cell.js +2 -2
  22. package/lib/model/change-model-compartment-details.d.ts +3 -3
  23. package/lib/model/change-model-compartment-details.js +2 -2
  24. package/lib/model/change-project-compartment-details.d.ts +2 -2
  25. package/lib/model/change-project-compartment-details.js +2 -2
  26. package/lib/model/create-document-job-details.d.ts +8 -8
  27. package/lib/model/create-document-job-details.js +2 -2
  28. package/lib/model/create-image-job-details.d.ts +7 -7
  29. package/lib/model/create-image-job-details.js +2 -2
  30. package/lib/model/create-model-details.d.ts +14 -14
  31. package/lib/model/create-model-details.js +2 -2
  32. package/lib/model/create-project-details.d.ts +9 -9
  33. package/lib/model/create-project-details.js +2 -2
  34. package/lib/model/data-science-labeling-dataset.d.ts +3 -3
  35. package/lib/model/data-science-labeling-dataset.js +2 -2
  36. package/lib/model/dataset.d.ts +2 -2
  37. package/lib/model/dataset.js +2 -2
  38. package/lib/model/detected-document-type.d.ts +5 -5
  39. package/lib/model/detected-document-type.js +2 -2
  40. package/lib/model/detected-language.d.ts +5 -5
  41. package/lib/model/detected-language.js +2 -2
  42. package/lib/model/dimensions.d.ts +6 -6
  43. package/lib/model/dimensions.js +2 -2
  44. package/lib/model/document-classification-feature.d.ts +4 -4
  45. package/lib/model/document-classification-feature.js +2 -2
  46. package/lib/model/document-details.d.ts +3 -3
  47. package/lib/model/document-details.js +2 -2
  48. package/lib/model/document-feature.d.ts +3 -3
  49. package/lib/model/document-feature.js +2 -2
  50. package/lib/model/document-field.d.ts +3 -3
  51. package/lib/model/document-field.js +2 -2
  52. package/lib/model/document-job.d.ts +14 -14
  53. package/lib/model/document-job.js +2 -2
  54. package/lib/model/document-key-value-detection-feature.d.ts +2 -2
  55. package/lib/model/document-key-value-detection-feature.js +2 -2
  56. package/lib/model/document-language-classification-feature.d.ts +2 -2
  57. package/lib/model/document-language-classification-feature.js +2 -2
  58. package/lib/model/document-language.d.ts +3 -3
  59. package/lib/model/document-language.js +3 -3
  60. package/lib/model/document-metadata.d.ts +5 -5
  61. package/lib/model/document-metadata.js +2 -2
  62. package/lib/model/document-table-detection-feature.d.ts +2 -2
  63. package/lib/model/document-table-detection-feature.js +2 -2
  64. package/lib/model/document-text-detection-feature.d.ts +3 -3
  65. package/lib/model/document-text-detection-feature.js +2 -2
  66. package/lib/model/document-type.d.ts +3 -3
  67. package/lib/model/document-type.js +3 -3
  68. package/lib/model/field-label.d.ts +5 -5
  69. package/lib/model/field-label.js +2 -2
  70. package/lib/model/field-name.d.ts +6 -6
  71. package/lib/model/field-name.js +2 -2
  72. package/lib/model/field-value.d.ts +6 -6
  73. package/lib/model/field-value.js +2 -2
  74. package/lib/model/image-classification-feature.d.ts +4 -4
  75. package/lib/model/image-classification-feature.js +2 -2
  76. package/lib/model/image-details.d.ts +3 -3
  77. package/lib/model/image-details.js +2 -2
  78. package/lib/model/image-feature.d.ts +3 -3
  79. package/lib/model/image-feature.js +2 -2
  80. package/lib/model/image-job.d.ts +12 -12
  81. package/lib/model/image-job.js +2 -2
  82. package/lib/model/image-object-detection-feature.d.ts +4 -4
  83. package/lib/model/image-object-detection-feature.js +2 -2
  84. package/lib/model/image-object.d.ts +5 -5
  85. package/lib/model/image-object.js +2 -2
  86. package/lib/model/image-text-detection-feature.d.ts +4 -4
  87. package/lib/model/image-text-detection-feature.js +2 -2
  88. package/lib/model/image-text.d.ts +5 -5
  89. package/lib/model/image-text.js +2 -2
  90. package/lib/model/index.d.ts +2 -2
  91. package/lib/model/index.js +2 -2
  92. package/lib/model/inline-document-details.d.ts +4 -4
  93. package/lib/model/inline-document-details.js +2 -2
  94. package/lib/model/inline-image-details.d.ts +4 -4
  95. package/lib/model/inline-image-details.js +2 -2
  96. package/lib/model/input-location.d.ts +3 -3
  97. package/lib/model/input-location.js +2 -2
  98. package/lib/model/label.d.ts +5 -5
  99. package/lib/model/label.js +2 -2
  100. package/lib/model/line.d.ts +6 -6
  101. package/lib/model/line.js +2 -2
  102. package/lib/model/model-collection.d.ts +4 -4
  103. package/lib/model/model-collection.js +2 -2
  104. package/lib/model/model-summary.d.ts +14 -14
  105. package/lib/model/model-summary.js +2 -2
  106. package/lib/model/model.d.ts +22 -22
  107. package/lib/model/model.js +2 -2
  108. package/lib/model/normalized-vertex.d.ts +4 -4
  109. package/lib/model/normalized-vertex.js +2 -2
  110. package/lib/model/object-list-inline-input-location.d.ts +3 -3
  111. package/lib/model/object-list-inline-input-location.js +2 -2
  112. package/lib/model/object-location.d.ts +5 -5
  113. package/lib/model/object-location.js +2 -2
  114. package/lib/model/object-storage-dataset.d.ts +3 -3
  115. package/lib/model/object-storage-dataset.js +2 -2
  116. package/lib/model/object-storage-document-details.d.ts +5 -5
  117. package/lib/model/object-storage-document-details.js +2 -2
  118. package/lib/model/object-storage-image-details.d.ts +6 -6
  119. package/lib/model/object-storage-image-details.js +2 -2
  120. package/lib/model/ontology-class.d.ts +5 -5
  121. package/lib/model/ontology-class.js +2 -2
  122. package/lib/model/operation-status.d.ts +3 -3
  123. package/lib/model/operation-status.js +3 -3
  124. package/lib/model/operation-type.d.ts +2 -2
  125. package/lib/model/operation-type.js +2 -2
  126. package/lib/model/output-location.d.ts +6 -6
  127. package/lib/model/output-location.js +2 -2
  128. package/lib/model/page.d.ts +7 -7
  129. package/lib/model/page.js +2 -2
  130. package/lib/model/processing-error.d.ts +5 -5
  131. package/lib/model/processing-error.js +2 -2
  132. package/lib/model/project-collection.d.ts +3 -3
  133. package/lib/model/project-collection.js +2 -2
  134. package/lib/model/project-summary.d.ts +11 -11
  135. package/lib/model/project-summary.js +2 -2
  136. package/lib/model/project.d.ts +12 -12
  137. package/lib/model/project.js +2 -2
  138. package/lib/model/sort-order.d.ts +2 -2
  139. package/lib/model/sort-order.js +2 -2
  140. package/lib/model/table-row.d.ts +3 -3
  141. package/lib/model/table-row.js +2 -2
  142. package/lib/model/table.d.ts +9 -9
  143. package/lib/model/table.js +2 -2
  144. package/lib/model/update-model-details.d.ts +8 -8
  145. package/lib/model/update-model-details.js +2 -2
  146. package/lib/model/update-project-details.d.ts +8 -8
  147. package/lib/model/update-project-details.js +2 -2
  148. package/lib/model/value-array.d.ts +3 -3
  149. package/lib/model/value-array.js +2 -2
  150. package/lib/model/value-date.d.ts +4 -4
  151. package/lib/model/value-date.js +2 -2
  152. package/lib/model/value-integer.d.ts +4 -4
  153. package/lib/model/value-integer.js +2 -2
  154. package/lib/model/value-number.d.ts +4 -4
  155. package/lib/model/value-number.js +2 -2
  156. package/lib/model/value-phone-number.d.ts +4 -4
  157. package/lib/model/value-phone-number.js +2 -2
  158. package/lib/model/value-string.d.ts +4 -4
  159. package/lib/model/value-string.js +2 -2
  160. package/lib/model/value-time.d.ts +4 -4
  161. package/lib/model/value-time.js +2 -2
  162. package/lib/model/word.d.ts +4 -4
  163. package/lib/model/word.js +2 -2
  164. package/lib/model/work-request-error-collection.d.ts +4 -4
  165. package/lib/model/work-request-error-collection.js +2 -2
  166. package/lib/model/work-request-error.d.ts +3 -3
  167. package/lib/model/work-request-error.js +2 -2
  168. package/lib/model/work-request-log-entry-collection.d.ts +4 -4
  169. package/lib/model/work-request-log-entry-collection.js +2 -2
  170. package/lib/model/work-request-log-entry.d.ts +3 -3
  171. package/lib/model/work-request-log-entry.js +2 -2
  172. package/lib/model/work-request-resource.d.ts +6 -6
  173. package/lib/model/work-request-resource.js +2 -2
  174. package/lib/model/work-request-summary-collection.d.ts +4 -4
  175. package/lib/model/work-request-summary-collection.js +2 -2
  176. package/lib/model/work-request-summary.d.ts +9 -9
  177. package/lib/model/work-request-summary.js +2 -2
  178. package/lib/model/work-request.d.ts +7 -7
  179. package/lib/model/work-request.js +2 -2
  180. package/lib/request/analyze-document-request.d.ts +2 -2
  181. package/lib/request/analyze-image-request.d.ts +1 -1
  182. package/lib/request/cancel-document-job-request.d.ts +1 -1
  183. package/lib/request/cancel-image-job-request.d.ts +1 -1
  184. package/lib/request/cancel-work-request-request.d.ts +1 -1
  185. package/lib/request/change-model-compartment-request.d.ts +3 -3
  186. package/lib/request/change-project-compartment-request.d.ts +2 -2
  187. package/lib/request/create-document-job-request.d.ts +3 -3
  188. package/lib/request/create-image-job-request.d.ts +3 -3
  189. package/lib/request/create-model-request.d.ts +3 -3
  190. package/lib/request/create-project-request.d.ts +3 -3
  191. package/lib/request/delete-model-request.d.ts +2 -2
  192. package/lib/request/delete-project-request.d.ts +2 -2
  193. package/lib/request/get-document-job-request.d.ts +1 -1
  194. package/lib/request/get-image-job-request.d.ts +1 -1
  195. package/lib/request/get-model-request.d.ts +2 -2
  196. package/lib/request/get-project-request.d.ts +2 -2
  197. package/lib/request/get-work-request-request.d.ts +1 -1
  198. package/lib/request/index.d.ts +2 -2
  199. package/lib/request/index.js +2 -2
  200. package/lib/request/list-models-request.d.ts +4 -4
  201. package/lib/request/list-projects-request.d.ts +4 -4
  202. package/lib/request/list-work-request-errors-request.d.ts +2 -2
  203. package/lib/request/list-work-request-logs-request.d.ts +2 -2
  204. package/lib/request/list-work-requests-request.d.ts +3 -3
  205. package/lib/request/update-model-request.d.ts +3 -3
  206. package/lib/request/update-project-request.d.ts +3 -3
  207. package/lib/response/analyze-document-response.d.ts +1 -1
  208. package/lib/response/analyze-image-response.d.ts +1 -1
  209. package/lib/response/cancel-document-job-response.d.ts +1 -1
  210. package/lib/response/cancel-image-job-response.d.ts +1 -1
  211. package/lib/response/cancel-work-request-response.d.ts +1 -1
  212. package/lib/response/change-model-compartment-response.d.ts +1 -1
  213. package/lib/response/change-project-compartment-response.d.ts +1 -1
  214. package/lib/response/create-document-job-response.d.ts +1 -1
  215. package/lib/response/create-image-job-response.d.ts +1 -1
  216. package/lib/response/create-model-response.d.ts +2 -2
  217. package/lib/response/create-project-response.d.ts +2 -2
  218. package/lib/response/delete-model-response.d.ts +2 -2
  219. package/lib/response/delete-project-response.d.ts +2 -2
  220. package/lib/response/get-document-job-response.d.ts +1 -1
  221. package/lib/response/get-image-job-response.d.ts +1 -1
  222. package/lib/response/get-model-response.d.ts +1 -1
  223. package/lib/response/get-project-response.d.ts +1 -1
  224. package/lib/response/get-work-request-response.d.ts +1 -1
  225. package/lib/response/index.d.ts +2 -2
  226. package/lib/response/index.js +2 -2
  227. package/lib/response/list-models-response.d.ts +1 -1
  228. package/lib/response/list-projects-response.d.ts +1 -1
  229. package/lib/response/list-work-request-errors-response.d.ts +1 -1
  230. package/lib/response/list-work-request-logs-response.d.ts +1 -1
  231. package/lib/response/list-work-requests-response.d.ts +1 -1
  232. package/lib/response/update-model-response.d.ts +2 -2
  233. package/lib/response/update-project-response.d.ts +2 -2
  234. package/package.json +3 -3
@@ -1,6 +1,6 @@
1
1
  /**
2
- * VisionService API
3
- * A description of the VisionService API.
2
+ * Vision API
3
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
4
4
  * OpenAPI spec version: 20220125
5
5
  *
6
6
  *
@@ -12,11 +12,11 @@
12
12
  */
13
13
  import * as model from "../model";
14
14
  /**
15
- * Results of a model search.
15
+ * The results of a model search.
16
16
  */
17
17
  export interface ModelCollection {
18
18
  /**
19
- * List of models.
19
+ * A list of models.
20
20
  */
21
21
  "items": Array<model.ModelSummary>;
22
22
  }
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  /**
3
- * VisionService API
4
- * A description of the VisionService API.
3
+ * Vision API
4
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
5
5
  * OpenAPI spec version: 20220125
6
6
  *
7
7
  *
@@ -1,6 +1,6 @@
1
1
  /**
2
- * VisionService API
3
- * A description of the VisionService API.
2
+ * Vision API
3
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
4
4
  * OpenAPI spec version: 20220125
5
5
  *
6
6
  *
@@ -12,23 +12,23 @@
12
12
  */
13
13
  import * as model from "../model";
14
14
  /**
15
- * Metadata about the model.
15
+ * The metadata about the model.
16
16
  */
17
17
  export interface ModelSummary {
18
18
  /**
19
- * Unique identifier that is immutable after creation.
19
+ * A unique identifier that is immutable after creation.
20
20
  */
21
21
  "id": string;
22
22
  /**
23
- * Human-friendly name for the model, which can be changed.
23
+ * A human-friendly name for the model, which can be changed.
24
24
  */
25
25
  "displayName"?: string;
26
26
  /**
27
- * Optional description of the model.
27
+ * An optional description of the model.
28
28
  */
29
29
  "description"?: string;
30
30
  /**
31
- * Compartment identifier.
31
+ * The compartment identifier.
32
32
  */
33
33
  "compartmentId": string;
34
34
  /**
@@ -40,7 +40,7 @@ export interface ModelSummary {
40
40
  */
41
41
  "modelVersion": string;
42
42
  /**
43
- * The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the project which contains the model.
43
+ * The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the project that contains the model.
44
44
  */
45
45
  "projectId": string;
46
46
  /**
@@ -56,19 +56,19 @@ export interface ModelSummary {
56
56
  */
57
57
  "lifecycleState": string;
58
58
  /**
59
- * A message describing the current state in more detail which can provide actionable information if training failed.
59
+ * A message describing the current state in more detail, that can provide actionable information if training failed.
60
60
  */
61
61
  "lifecycleDetails"?: string;
62
62
  /**
63
- * Precision of the trained model. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
63
+ * The precision of the trained model. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
64
64
  */
65
65
  "precision"?: number;
66
66
  "trainingDataset"?: model.DataScienceLabelingDataset | model.ObjectStorageDataset;
67
67
  "testingDataset"?: model.DataScienceLabelingDataset | model.ObjectStorageDataset;
68
68
  "validationDataset"?: model.DataScienceLabelingDataset | model.ObjectStorageDataset;
69
69
  /**
70
- * Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
71
- * Example: `{\"bar-key\": \"value\"}`
70
+ * A simple key-value pair that is applied without any predefined name, type, or scope. It exists for cross-compatibility only.
71
+ * For example: `{\"bar-key\": \"value\"}`
72
72
  *
73
73
  */
74
74
  "freeformTags"?: {
@@ -76,7 +76,7 @@ export interface ModelSummary {
76
76
  };
77
77
  /**
78
78
  * Defined tags for this resource. Each key is predefined and scoped to a namespace.
79
- * Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
79
+ * For example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
80
80
  *
81
81
  */
82
82
  "definedTags"?: {
@@ -86,7 +86,7 @@ export interface ModelSummary {
86
86
  };
87
87
  /**
88
88
  * Usage of system tag keys. These predefined keys are scoped to namespaces.
89
- * Example: `{\"orcl-cloud\": {\"free-tier-retained\": \"true\"}}`
89
+ * For example: `{\"orcl-cloud\": {\"free-tier-retained\": \"true\"}}`
90
90
  *
91
91
  */
92
92
  "systemTags"?: {
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  /**
3
- * VisionService API
4
- * A description of the VisionService API.
3
+ * Vision API
4
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
5
5
  * OpenAPI spec version: 20220125
6
6
  *
7
7
  *
@@ -1,6 +1,6 @@
1
1
  /**
2
- * VisionService API
3
- * A description of the VisionService API.
2
+ * Vision API
3
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
4
4
  * OpenAPI spec version: 20220125
5
5
  *
6
6
  *
@@ -16,19 +16,19 @@ import * as model from "../model";
16
16
  */
17
17
  export interface Model {
18
18
  /**
19
- * Unique identifier that is immutable after creation.
19
+ * A unique identifier that is immutable after creation.
20
20
  */
21
21
  "id": string;
22
22
  /**
23
- * Human-friendly name for the model, which can be changed.
23
+ * A human-friendly name for the model, which can be changed.
24
24
  */
25
25
  "displayName"?: string;
26
26
  /**
27
- * Optional description of the model.
27
+ * An optional description of the model.
28
28
  */
29
29
  "description"?: string;
30
30
  /**
31
- * Compartment identifier.
31
+ * The compartment identifier.
32
32
  */
33
33
  "compartmentId": string;
34
34
  /**
@@ -36,15 +36,15 @@ export interface Model {
36
36
  */
37
37
  "modelType": Model.ModelType;
38
38
  /**
39
- * Set to true when experimenting with a new model type or dataset so model training is quick, with a predefined low number of passes through the training data.
39
+ * Set to true when experimenting with a new model type or dataset, so model training is quick, with a predefined low number of passes through the training data.
40
40
  */
41
41
  "isQuickMode"?: boolean;
42
42
  /**
43
- * Maximum model training duration in hours, expressed as a decimal fraction. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
43
+ * The maximum model training duration in hours, expressed as a decimal fraction. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
44
44
  */
45
45
  "maxTrainingDurationInHours"?: number;
46
46
  /**
47
- * Total hours actually used for model training. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
47
+ * The total hours actually used for model training. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
48
48
  */
49
49
  "trainedDurationInHours"?: number;
50
50
  "trainingDataset": model.DataScienceLabelingDataset | model.ObjectStorageDataset;
@@ -55,7 +55,7 @@ export interface Model {
55
55
  */
56
56
  "modelVersion": string;
57
57
  /**
58
- * The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the project which contains the model.
58
+ * The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the project that contains the model.
59
59
  */
60
60
  "projectId": string;
61
61
  /**
@@ -67,15 +67,15 @@ export interface Model {
67
67
  */
68
68
  "timeUpdated"?: Date;
69
69
  /**
70
- * Current state of the model.
70
+ * The current state of the model.
71
71
  */
72
72
  "lifecycleState": Model.LifecycleState;
73
73
  /**
74
- * A message describing the current state in more detail which can provide actionable information if training failed.
74
+ * A message describing the current state in more detail, that can provide actionable information if training failed.
75
75
  */
76
76
  "lifecycleDetails"?: string;
77
77
  /**
78
- * Precision of the trained model. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
78
+ * The precision of the trained model. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
79
79
  */
80
80
  "precision"?: number;
81
81
  /**
@@ -83,28 +83,28 @@ export interface Model {
83
83
  */
84
84
  "recall"?: number;
85
85
  /**
86
- * Mean average precision of the trained model. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
86
+ * The mean average precision of the trained model. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
87
87
  */
88
88
  "averagePrecision"?: number;
89
89
  /**
90
- * Intersection over union threshold used for calculating precision and recall. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
90
+ * The intersection over the union threshold used for calculating precision and recall. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
91
91
  */
92
92
  "confidenceThreshold"?: number;
93
93
  /**
94
- * Number of images in the dataset used to train, validate, and test the model. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
94
+ * The number of images in the dataset used to train, validate, and test the model. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
95
95
  */
96
96
  "totalImageCount"?: number;
97
97
  /**
98
- * Number of images set aside for evaluating model performance metrics after training. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
98
+ * The number of images set aside for evaluating model performance metrics after training. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
99
99
  */
100
100
  "testImageCount"?: number;
101
101
  /**
102
- * Complete set of per-label metrics for successfully trained model.
102
+ * The complete set of per-label metrics for successfully trained models.
103
103
  */
104
104
  "metrics"?: string;
105
105
  /**
106
- * Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
107
- * Example: `{\"bar-key\": \"value\"}`
106
+ * A simple key-value pair that is applied without any predefined name, type, or scope. It exists for cross-compatibility only.
107
+ * For example: `{\"bar-key\": \"value\"}`
108
108
  *
109
109
  */
110
110
  "freeformTags"?: {
@@ -112,7 +112,7 @@ export interface Model {
112
112
  };
113
113
  /**
114
114
  * Defined tags for this resource. Each key is predefined and scoped to a namespace.
115
- * Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
115
+ * For example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
116
116
  *
117
117
  */
118
118
  "definedTags"?: {
@@ -122,7 +122,7 @@ export interface Model {
122
122
  };
123
123
  /**
124
124
  * Usage of system tag keys. These predefined keys are scoped to namespaces.
125
- * Example: `{\"orcl-cloud\": {\"free-tier-retained\": \"true\"}}`
125
+ * For example: `{\"orcl-cloud\": {\"free-tier-retained\": \"true\"}}`
126
126
  *
127
127
  */
128
128
  "systemTags"?: {
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  /**
3
- * VisionService API
4
- * A description of the VisionService API.
3
+ * Vision API
4
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
5
5
  * OpenAPI spec version: 20220125
6
6
  *
7
7
  *
@@ -1,6 +1,6 @@
1
1
  /**
2
- * VisionService API
3
- * A description of the VisionService API.
2
+ * Vision API
3
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
4
4
  * OpenAPI spec version: 20220125
5
5
  *
6
6
  *
@@ -18,11 +18,11 @@
18
18
  */
19
19
  export interface NormalizedVertex {
20
20
  /**
21
- * X axis normalized coordinate. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
21
+ * The X-axis normalized coordinate. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
22
22
  */
23
23
  "x": number;
24
24
  /**
25
- * Y axis normalized coordinate. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
25
+ * The Y-axis normalized coordinate. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
26
26
  */
27
27
  "y": number;
28
28
  }
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  /**
3
- * VisionService API
4
- * A description of the VisionService API.
3
+ * Vision API
4
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
5
5
  * OpenAPI spec version: 20220125
6
6
  *
7
7
  *
@@ -1,6 +1,6 @@
1
1
  /**
2
- * VisionService API
3
- * A description of the VisionService API.
2
+ * Vision API
3
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
4
4
  * OpenAPI spec version: 20220125
5
5
  *
6
6
  *
@@ -16,7 +16,7 @@ import * as model from "../model";
16
16
  */
17
17
  export interface ObjectListInlineInputLocation extends model.InputLocation {
18
18
  /**
19
- * List of ObjectLocations.
19
+ * The list of ObjectLocations.
20
20
  */
21
21
  "objectLocations": Array<model.ObjectLocation>;
22
22
  "sourceType": string;
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  /**
3
- * VisionService API
4
- * A description of the VisionService API.
3
+ * Vision API
4
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
5
5
  * OpenAPI spec version: 20220125
6
6
  *
7
7
  *
@@ -1,6 +1,6 @@
1
1
  /**
2
- * VisionService API
3
- * A description of the VisionService API.
2
+ * Vision API
3
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
4
4
  * OpenAPI spec version: 20220125
5
5
  *
6
6
  *
@@ -15,15 +15,15 @@
15
15
  */
16
16
  export interface ObjectLocation {
17
17
  /**
18
- * Object Storage namespace name.
18
+ * The Object Storage namespace name.
19
19
  */
20
20
  "namespaceName": string;
21
21
  /**
22
- * Object Storage bucket name.
22
+ * The Object Storage bucket name.
23
23
  */
24
24
  "bucketName": string;
25
25
  /**
26
- * Object Storage object name.
26
+ * The Object Storage object name.
27
27
  */
28
28
  "objectName": string;
29
29
  }
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  /**
3
- * VisionService API
4
- * A description of the VisionService API.
3
+ * Vision API
4
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
5
5
  * OpenAPI spec version: 20220125
6
6
  *
7
7
  *
@@ -1,6 +1,6 @@
1
1
  /**
2
- * VisionService API
3
- * A description of the VisionService API.
2
+ * Vision API
3
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
4
4
  * OpenAPI spec version: 20220125
5
5
  *
6
6
  *
@@ -12,7 +12,7 @@
12
12
  */
13
13
  import * as model from "../model";
14
14
  /**
15
- * Dataset that resides in OCI Object Storage.
15
+ * The dataset that resides in Object Storage.
16
16
  */
17
17
  export interface ObjectStorageDataset extends model.Dataset {
18
18
  /**
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  /**
3
- * VisionService API
4
- * A description of the VisionService API.
3
+ * Vision API
4
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
5
5
  * OpenAPI spec version: 20220125
6
6
  *
7
7
  *
@@ -1,6 +1,6 @@
1
1
  /**
2
- * VisionService API
3
- * A description of the VisionService API.
2
+ * Vision API
3
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
4
4
  * OpenAPI spec version: 20220125
5
5
  *
6
6
  *
@@ -16,15 +16,15 @@ import * as model from "../model";
16
16
  */
17
17
  export interface ObjectStorageDocumentDetails extends model.DocumentDetails {
18
18
  /**
19
- * Object Storage namespace.
19
+ * The Object Storage namespace.
20
20
  */
21
21
  "namespaceName": string;
22
22
  /**
23
- * Object Storage bucket name.
23
+ * The Object Storage bucket name.
24
24
  */
25
25
  "bucketName": string;
26
26
  /**
27
- * Object Storage object name.
27
+ * The Object Storage object name.
28
28
  */
29
29
  "objectName": string;
30
30
  "source": string;
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  /**
3
- * VisionService API
4
- * A description of the VisionService API.
3
+ * Vision API
4
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
5
5
  * OpenAPI spec version: 20220125
6
6
  *
7
7
  *
@@ -1,6 +1,6 @@
1
1
  /**
2
- * VisionService API
3
- * A description of the VisionService API.
2
+ * Vision API
3
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
4
4
  * OpenAPI spec version: 20220125
5
5
  *
6
6
  *
@@ -12,19 +12,19 @@
12
12
  */
13
13
  import * as model from "../model";
14
14
  /**
15
- * Image residing in OCI Object Storage.
15
+ * The image residing in OCI Object Storage.
16
16
  */
17
17
  export interface ObjectStorageImageDetails extends model.ImageDetails {
18
18
  /**
19
- * Object Storage namespace.
19
+ * The Object Storage namespace.
20
20
  */
21
21
  "namespaceName": string;
22
22
  /**
23
- * Object Storage bucket name.
23
+ * The Object Storage bucket name.
24
24
  */
25
25
  "bucketName": string;
26
26
  /**
27
- * Object Storage object name.
27
+ * The Object Storage object name.
28
28
  */
29
29
  "objectName": string;
30
30
  "source": string;
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  /**
3
- * VisionService API
4
- * A description of the VisionService API.
3
+ * Vision API
4
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
5
5
  * OpenAPI spec version: 20220125
6
6
  *
7
7
  *
@@ -1,6 +1,6 @@
1
1
  /**
2
- * VisionService API
3
- * A description of the VisionService API.
2
+ * Vision API
3
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
4
4
  * OpenAPI spec version: 20220125
5
5
  *
6
6
  *
@@ -15,15 +15,15 @@
15
15
  */
16
16
  export interface OntologyClass {
17
17
  /**
18
- * Name of the label.
18
+ * The label name.
19
19
  */
20
20
  "name": string;
21
21
  /**
22
- * Parents of the label.
22
+ * The label parents.
23
23
  */
24
24
  "parentNames"?: Array<string>;
25
25
  /**
26
- * Synonyms of the label.
26
+ * The label synonyms.
27
27
  */
28
28
  "synonymNames"?: Array<string>;
29
29
  }
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  /**
3
- * VisionService API
4
- * A description of the VisionService API.
3
+ * Vision API
4
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
5
5
  * OpenAPI spec version: 20220125
6
6
  *
7
7
  *
@@ -1,6 +1,6 @@
1
1
  /**
2
- * VisionService API
3
- * A description of the VisionService API.
2
+ * Vision API
3
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
4
4
  * OpenAPI spec version: 20220125
5
5
  *
6
6
  *
@@ -11,7 +11,7 @@
11
11
  * This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
12
12
  */
13
13
  /**
14
- * Possible operation status.
14
+ * Possible operation statuses.
15
15
  **/
16
16
  export declare enum OperationStatus {
17
17
  Accepted = "ACCEPTED",
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  /**
3
- * VisionService API
4
- * A description of the VisionService API.
3
+ * Vision API
4
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
5
5
  * OpenAPI spec version: 20220125
6
6
  *
7
7
  *
@@ -14,7 +14,7 @@
14
14
  Object.defineProperty(exports, "__esModule", { value: true });
15
15
  exports.OperationStatus = void 0;
16
16
  /**
17
- * Possible operation status.
17
+ * Possible operation statuses.
18
18
  **/
19
19
  var OperationStatus;
20
20
  (function (OperationStatus) {
@@ -1,6 +1,6 @@
1
1
  /**
2
- * VisionService API
3
- * A description of the VisionService API.
2
+ * Vision API
3
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
4
4
  * OpenAPI spec version: 20220125
5
5
  *
6
6
  *
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  /**
3
- * VisionService API
4
- * A description of the VisionService API.
3
+ * Vision API
4
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
5
5
  * OpenAPI spec version: 20220125
6
6
  *
7
7
  *
@@ -1,6 +1,6 @@
1
1
  /**
2
- * VisionService API
3
- * A description of the VisionService API.
2
+ * Vision API
3
+ * Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
4
4
  * OpenAPI spec version: 20220125
5
5
  *
6
6
  *
@@ -11,19 +11,19 @@
11
11
  * This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
12
12
  */
13
13
  /**
14
- * OCI Object Storage Location.
14
+ * The Object Storage Location.
15
15
  */
16
16
  export interface OutputLocation {
17
17
  /**
18
- * Object Storage namespace.
18
+ * The Object Storage namespace.
19
19
  */
20
20
  "namespaceName": string;
21
21
  /**
22
- * Object Storage bucket name.
22
+ * The Object Storage bucket name.
23
23
  */
24
24
  "bucketName": string;
25
25
  /**
26
- * Object Storage folder name.
26
+ * The Object Storage folder name.
27
27
  */
28
28
  "prefix": string;
29
29
  }