oci-aivision 2.99.0 → 2.100.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/aiservicevision-waiter.d.ts +8 -0
- package/lib/aiservicevision-waiter.js +12 -0
- package/lib/aiservicevision-waiter.js.map +1 -1
- package/lib/client.d.ts +35 -0
- package/lib/client.js +195 -0
- package/lib/client.js.map +1 -1
- package/lib/model/analyze-video-result.d.ts +71 -0
- package/lib/model/analyze-video-result.js +127 -0
- package/lib/model/analyze-video-result.js.map +1 -0
- package/lib/model/create-video-job-details.d.ts +54 -0
- package/lib/model/create-video-job-details.js +73 -0
- package/lib/model/create-video-job-details.js.map +1 -0
- package/lib/model/index.d.ts +54 -0
- package/lib/model/index.js +57 -2
- package/lib/model/index.js.map +1 -1
- package/lib/model/object-property.d.ts +31 -0
- package/lib/model/object-property.js +57 -0
- package/lib/model/object-property.js.map +1 -0
- package/lib/model/video-face-detection-feature.d.ts +38 -0
- package/lib/model/video-face-detection-feature.js +54 -0
- package/lib/model/video-face-detection-feature.js.map +1 -0
- package/lib/model/video-face-frame.d.ts +39 -0
- package/lib/model/video-face-frame.js +67 -0
- package/lib/model/video-face-frame.js.map +1 -0
- package/lib/model/video-face-segment.d.ts +31 -0
- package/lib/model/video-face-segment.js +67 -0
- package/lib/model/video-face-segment.js.map +1 -0
- package/lib/model/video-face.d.ts +26 -0
- package/lib/model/video-face.js +61 -0
- package/lib/model/video-face.js.map +1 -0
- package/lib/model/video-feature.d.ts +22 -0
- package/lib/model/video-feature.js +84 -0
- package/lib/model/video-feature.js.map +1 -0
- package/lib/model/video-job.d.ts +114 -0
- package/lib/model/video-job.js +97 -0
- package/lib/model/video-job.js.map +1 -0
- package/lib/model/video-label-detection-feature.d.ts +38 -0
- package/lib/model/video-label-detection-feature.js +54 -0
- package/lib/model/video-label-detection-feature.js.map +1 -0
- package/lib/model/video-label-segment.d.ts +27 -0
- package/lib/model/video-label-segment.js +57 -0
- package/lib/model/video-label-segment.js.map +1 -0
- package/lib/model/video-label.d.ts +30 -0
- package/lib/model/video-label.js +61 -0
- package/lib/model/video-label.js.map +1 -0
- package/lib/model/video-metadata.d.ts +37 -0
- package/lib/model/video-metadata.js +29 -0
- package/lib/model/video-metadata.js.map +1 -0
- package/lib/model/video-object-detection-feature.d.ts +38 -0
- package/lib/model/video-object-detection-feature.js +54 -0
- package/lib/model/video-object-detection-feature.js.map +1 -0
- package/lib/model/video-object-frame.d.ts +31 -0
- package/lib/model/video-object-frame.js +57 -0
- package/lib/model/video-object-frame.js.map +1 -0
- package/lib/model/video-object-segment.d.ts +31 -0
- package/lib/model/video-object-segment.js +67 -0
- package/lib/model/video-object-segment.js.map +1 -0
- package/lib/model/video-object-tracking-feature.d.ts +38 -0
- package/lib/model/video-object-tracking-feature.js +54 -0
- package/lib/model/video-object-tracking-feature.js.map +1 -0
- package/lib/model/video-object.d.ts +30 -0
- package/lib/model/video-object.js +61 -0
- package/lib/model/video-object.js.map +1 -0
- package/lib/model/video-segment.d.ts +29 -0
- package/lib/model/video-segment.js +29 -0
- package/lib/model/video-segment.js.map +1 -0
- package/lib/model/video-text-detection-feature.d.ts +30 -0
- package/lib/model/video-text-detection-feature.js +54 -0
- package/lib/model/video-text-detection-feature.js.map +1 -0
- package/lib/model/video-text-frame.d.ts +31 -0
- package/lib/model/video-text-frame.js +57 -0
- package/lib/model/video-text-frame.js.map +1 -0
- package/lib/model/video-text-segment.d.ts +31 -0
- package/lib/model/video-text-segment.js +67 -0
- package/lib/model/video-text-segment.js.map +1 -0
- package/lib/model/video-text.d.ts +30 -0
- package/lib/model/video-text.js +61 -0
- package/lib/model/video-text.js.map +1 -0
- package/lib/model/video-tracked-object-properties.d.ts +25 -0
- package/lib/model/video-tracked-object-properties.js +29 -0
- package/lib/model/video-tracked-object-properties.js.map +1 -0
- package/lib/model/video-tracked-object-segment.d.ts +31 -0
- package/lib/model/video-tracked-object-segment.js +67 -0
- package/lib/model/video-tracked-object-segment.js.map +1 -0
- package/lib/model/video-tracked-object.d.ts +35 -0
- package/lib/model/video-tracked-object.js +67 -0
- package/lib/model/video-tracked-object.js.map +1 -0
- package/lib/model/video-tracking-frame.d.ts +35 -0
- package/lib/model/video-tracking-frame.js +67 -0
- package/lib/model/video-tracking-frame.js.map +1 -0
- package/lib/request/cancel-video-job-request.d.ts +35 -0
- package/lib/request/cancel-video-job-request.js +15 -0
- package/lib/request/cancel-video-job-request.js.map +1 -0
- package/lib/request/create-video-job-request.d.ts +36 -0
- package/lib/request/create-video-job-request.js +15 -0
- package/lib/request/create-video-job-request.js.map +1 -0
- package/lib/request/get-video-job-request.d.ts +26 -0
- package/lib/request/get-video-job-request.js +15 -0
- package/lib/request/get-video-job-request.js.map +1 -0
- package/lib/request/index.d.ts +6 -0
- package/lib/request/index.js.map +1 -1
- package/lib/response/cancel-video-job-response.d.ts +20 -0
- package/lib/response/cancel-video-job-response.js +15 -0
- package/lib/response/cancel-video-job-response.js.map +1 -0
- package/lib/response/create-video-job-response.d.ts +30 -0
- package/lib/response/create-video-job-response.js +15 -0
- package/lib/response/create-video-job-response.js.map +1 -0
- package/lib/response/get-video-job-response.d.ts +30 -0
- package/lib/response/get-video-job-response.js +15 -0
- package/lib/response/get-video-job-response.js.map +1 -0
- package/lib/response/index.d.ts +6 -0
- package/package.json +3 -3
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"video-object-frame.js","sourceRoot":"","sources":["../../../../../lib/aivision/lib/model/video-object-frame.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;GAWG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAkBlC,IAAiB,gBAAgB,CAyBhC;AAzBD,WAAiB,gBAAgB;IAC/B,SAAgB,UAAU,CAAC,GAAqB;QAC9C,MAAM,OAAO,mCACR,GAAG,GACH;YACD,iBAAiB,EAAE,GAAG,CAAC,eAAe;gBACpC,CAAC,CAAC,KAAK,CAAC,eAAe,CAAC,UAAU,CAAC,GAAG,CAAC,eAAe,CAAC;gBACvD,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAXe,2BAAU,aAWzB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAqB;QAC1D,MAAM,OAAO,mCACR,GAAG,GACH;YACD,iBAAiB,EAAE,GAAG,CAAC,eAAe;gBACpC,CAAC,CAAC,KAAK,CAAC,eAAe,CAAC,sBAAsB,CAAC,GAAG,CAAC,eAAe,CAAC;gBACnE,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAXe,uCAAsB,yBAWrC,CAAA;AACH,CAAC,EAzBgB,gBAAgB,GAAhB,wBAAgB,KAAhB,wBAAgB,QAyBhC"}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
|
+
* OpenAPI spec version: 20220125
|
|
5
|
+
*
|
|
6
|
+
*
|
|
7
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
8
|
+
* Do not edit the class manually.
|
|
9
|
+
*
|
|
10
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
11
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
12
|
+
*/
|
|
13
|
+
import * as model from "../model";
|
|
14
|
+
/**
|
|
15
|
+
* An object segment in a video.
|
|
16
|
+
*/
|
|
17
|
+
export interface VideoObjectSegment {
|
|
18
|
+
"videoSegment": model.VideoSegment;
|
|
19
|
+
/**
|
|
20
|
+
* The confidence score, between 0 and 1. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
21
|
+
*/
|
|
22
|
+
"confidence": number;
|
|
23
|
+
/**
|
|
24
|
+
* Object frame in a segment.
|
|
25
|
+
*/
|
|
26
|
+
"frames": Array<model.VideoObjectFrame>;
|
|
27
|
+
}
|
|
28
|
+
export declare namespace VideoObjectSegment {
|
|
29
|
+
function getJsonObj(obj: VideoObjectSegment): object;
|
|
30
|
+
function getDeserializedJsonObj(obj: VideoObjectSegment): object;
|
|
31
|
+
}
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
|
+
* OpenAPI spec version: 20220125
|
|
6
|
+
*
|
|
7
|
+
*
|
|
8
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
9
|
+
* Do not edit the class manually.
|
|
10
|
+
*
|
|
11
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
12
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
13
|
+
*/
|
|
14
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
15
|
+
if (k2 === undefined) k2 = k;
|
|
16
|
+
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
|
|
17
|
+
}) : (function(o, m, k, k2) {
|
|
18
|
+
if (k2 === undefined) k2 = k;
|
|
19
|
+
o[k2] = m[k];
|
|
20
|
+
}));
|
|
21
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
22
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
23
|
+
}) : function(o, v) {
|
|
24
|
+
o["default"] = v;
|
|
25
|
+
});
|
|
26
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
27
|
+
if (mod && mod.__esModule) return mod;
|
|
28
|
+
var result = {};
|
|
29
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
30
|
+
__setModuleDefault(result, mod);
|
|
31
|
+
return result;
|
|
32
|
+
};
|
|
33
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
34
|
+
exports.VideoObjectSegment = void 0;
|
|
35
|
+
const model = __importStar(require("../model"));
|
|
36
|
+
var VideoObjectSegment;
|
|
37
|
+
(function (VideoObjectSegment) {
|
|
38
|
+
function getJsonObj(obj) {
|
|
39
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
40
|
+
"videoSegment": obj.videoSegment
|
|
41
|
+
? model.VideoSegment.getJsonObj(obj.videoSegment)
|
|
42
|
+
: undefined,
|
|
43
|
+
"frames": obj.frames
|
|
44
|
+
? obj.frames.map(item => {
|
|
45
|
+
return model.VideoObjectFrame.getJsonObj(item);
|
|
46
|
+
})
|
|
47
|
+
: undefined
|
|
48
|
+
});
|
|
49
|
+
return jsonObj;
|
|
50
|
+
}
|
|
51
|
+
VideoObjectSegment.getJsonObj = getJsonObj;
|
|
52
|
+
function getDeserializedJsonObj(obj) {
|
|
53
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
54
|
+
"videoSegment": obj.videoSegment
|
|
55
|
+
? model.VideoSegment.getDeserializedJsonObj(obj.videoSegment)
|
|
56
|
+
: undefined,
|
|
57
|
+
"frames": obj.frames
|
|
58
|
+
? obj.frames.map(item => {
|
|
59
|
+
return model.VideoObjectFrame.getDeserializedJsonObj(item);
|
|
60
|
+
})
|
|
61
|
+
: undefined
|
|
62
|
+
});
|
|
63
|
+
return jsonObj;
|
|
64
|
+
}
|
|
65
|
+
VideoObjectSegment.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
66
|
+
})(VideoObjectSegment = exports.VideoObjectSegment || (exports.VideoObjectSegment = {}));
|
|
67
|
+
//# sourceMappingURL=video-object-segment.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"video-object-segment.js","sourceRoot":"","sources":["../../../../../lib/aivision/lib/model/video-object-segment.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;GAWG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAkBlC,IAAiB,kBAAkB,CAqClC;AArCD,WAAiB,kBAAkB;IACjC,SAAgB,UAAU,CAAC,GAAuB;QAChD,MAAM,OAAO,mCACR,GAAG,GACH;YACD,cAAc,EAAE,GAAG,CAAC,YAAY;gBAC9B,CAAC,CAAC,KAAK,CAAC,YAAY,CAAC,UAAU,CAAC,GAAG,CAAC,YAAY,CAAC;gBACjD,CAAC,CAAC,SAAS;YAEb,QAAQ,EAAE,GAAG,CAAC,MAAM;gBAClB,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACpB,OAAO,KAAK,CAAC,gBAAgB,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;gBACjD,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAjBe,6BAAU,aAiBzB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAuB;QAC5D,MAAM,OAAO,mCACR,GAAG,GACH;YACD,cAAc,EAAE,GAAG,CAAC,YAAY;gBAC9B,CAAC,CAAC,KAAK,CAAC,YAAY,CAAC,sBAAsB,CAAC,GAAG,CAAC,YAAY,CAAC;gBAC7D,CAAC,CAAC,SAAS;YAEb,QAAQ,EAAE,GAAG,CAAC,MAAM;gBAClB,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACpB,OAAO,KAAK,CAAC,gBAAgB,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;gBAC7D,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAjBe,yCAAsB,yBAiBrC,CAAA;AACH,CAAC,EArCgB,kBAAkB,GAAlB,0BAAkB,KAAlB,0BAAkB,QAqClC"}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
|
+
* OpenAPI spec version: 20220125
|
|
5
|
+
*
|
|
6
|
+
*
|
|
7
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
8
|
+
* Do not edit the class manually.
|
|
9
|
+
*
|
|
10
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
11
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
12
|
+
*/
|
|
13
|
+
import * as model from "../model";
|
|
14
|
+
/**
|
|
15
|
+
* Video object tracking feature
|
|
16
|
+
*/
|
|
17
|
+
export interface VideoObjectTrackingFeature extends model.VideoFeature {
|
|
18
|
+
/**
|
|
19
|
+
* The minimum confidence score, between 0 and 1,
|
|
20
|
+
* when the value is set, results with lower confidence will not be returned.
|
|
21
|
+
* Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
22
|
+
*/
|
|
23
|
+
"minConfidence"?: number;
|
|
24
|
+
/**
|
|
25
|
+
* The maximum number of results per frame to return. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
26
|
+
*/
|
|
27
|
+
"maxResults"?: number;
|
|
28
|
+
/**
|
|
29
|
+
* The custom model ID.
|
|
30
|
+
*/
|
|
31
|
+
"modelId"?: string;
|
|
32
|
+
"featureType": string;
|
|
33
|
+
}
|
|
34
|
+
export declare namespace VideoObjectTrackingFeature {
|
|
35
|
+
function getJsonObj(obj: VideoObjectTrackingFeature, isParentJsonObj?: boolean): object;
|
|
36
|
+
const featureType = "OBJECT_TRACKING";
|
|
37
|
+
function getDeserializedJsonObj(obj: VideoObjectTrackingFeature, isParentJsonObj?: boolean): object;
|
|
38
|
+
}
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
|
+
* OpenAPI spec version: 20220125
|
|
6
|
+
*
|
|
7
|
+
*
|
|
8
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
9
|
+
* Do not edit the class manually.
|
|
10
|
+
*
|
|
11
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
12
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
13
|
+
*/
|
|
14
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
15
|
+
if (k2 === undefined) k2 = k;
|
|
16
|
+
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
|
|
17
|
+
}) : (function(o, m, k, k2) {
|
|
18
|
+
if (k2 === undefined) k2 = k;
|
|
19
|
+
o[k2] = m[k];
|
|
20
|
+
}));
|
|
21
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
22
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
23
|
+
}) : function(o, v) {
|
|
24
|
+
o["default"] = v;
|
|
25
|
+
});
|
|
26
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
27
|
+
if (mod && mod.__esModule) return mod;
|
|
28
|
+
var result = {};
|
|
29
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
30
|
+
__setModuleDefault(result, mod);
|
|
31
|
+
return result;
|
|
32
|
+
};
|
|
33
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
34
|
+
exports.VideoObjectTrackingFeature = void 0;
|
|
35
|
+
const model = __importStar(require("../model"));
|
|
36
|
+
var VideoObjectTrackingFeature;
|
|
37
|
+
(function (VideoObjectTrackingFeature) {
|
|
38
|
+
function getJsonObj(obj, isParentJsonObj) {
|
|
39
|
+
const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj
|
|
40
|
+
? obj
|
|
41
|
+
: model.VideoFeature.getJsonObj(obj))), {});
|
|
42
|
+
return jsonObj;
|
|
43
|
+
}
|
|
44
|
+
VideoObjectTrackingFeature.getJsonObj = getJsonObj;
|
|
45
|
+
VideoObjectTrackingFeature.featureType = "OBJECT_TRACKING";
|
|
46
|
+
function getDeserializedJsonObj(obj, isParentJsonObj) {
|
|
47
|
+
const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj
|
|
48
|
+
? obj
|
|
49
|
+
: model.VideoFeature.getDeserializedJsonObj(obj))), {});
|
|
50
|
+
return jsonObj;
|
|
51
|
+
}
|
|
52
|
+
VideoObjectTrackingFeature.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
53
|
+
})(VideoObjectTrackingFeature = exports.VideoObjectTrackingFeature || (exports.VideoObjectTrackingFeature = {}));
|
|
54
|
+
//# sourceMappingURL=video-object-tracking-feature.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"video-object-tracking-feature.js","sourceRoot":"","sources":["../../../../../lib/aivision/lib/model/video-object-tracking-feature.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;GAWG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAyBlC,IAAiB,0BAA0B,CAyB1C;AAzBD,WAAiB,0BAA0B;IACzC,SAAgB,UAAU,CAAC,GAA+B,EAAE,eAAyB;QACnF,MAAM,OAAO,mCACR,CAAC,eAAe;YACjB,CAAC,CAAC,GAAG;YACL,CAAC,CAAE,KAAK,CAAC,YAAY,CAAC,UAAU,CAAC,GAAG,CAAgC,CAAC,GACpE,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IATe,qCAAU,aASzB,CAAA;IACY,sCAAW,GAAG,iBAAiB,CAAC;IAC7C,SAAgB,sBAAsB,CACpC,GAA+B,EAC/B,eAAyB;QAEzB,MAAM,OAAO,mCACR,CAAC,eAAe;YACjB,CAAC,CAAC,GAAG;YACL,CAAC,CAAE,KAAK,CAAC,YAAY,CAAC,sBAAsB,CAAC,GAAG,CAAgC,CAAC,GAChF,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAZe,iDAAsB,yBAYrC,CAAA;AACH,CAAC,EAzBgB,0BAA0B,GAA1B,kCAA0B,KAA1B,kCAA0B,QAyB1C"}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
|
+
* OpenAPI spec version: 20220125
|
|
5
|
+
*
|
|
6
|
+
*
|
|
7
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
8
|
+
* Do not edit the class manually.
|
|
9
|
+
*
|
|
10
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
11
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
12
|
+
*/
|
|
13
|
+
import * as model from "../model";
|
|
14
|
+
/**
|
|
15
|
+
* Detected object in a video.
|
|
16
|
+
*/
|
|
17
|
+
export interface VideoObject {
|
|
18
|
+
/**
|
|
19
|
+
* Detected object name.
|
|
20
|
+
*/
|
|
21
|
+
"name": string;
|
|
22
|
+
/**
|
|
23
|
+
* Object segments in a video.
|
|
24
|
+
*/
|
|
25
|
+
"segments": Array<model.VideoObjectSegment>;
|
|
26
|
+
}
|
|
27
|
+
export declare namespace VideoObject {
|
|
28
|
+
function getJsonObj(obj: VideoObject): object;
|
|
29
|
+
function getDeserializedJsonObj(obj: VideoObject): object;
|
|
30
|
+
}
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
|
+
* OpenAPI spec version: 20220125
|
|
6
|
+
*
|
|
7
|
+
*
|
|
8
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
9
|
+
* Do not edit the class manually.
|
|
10
|
+
*
|
|
11
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
12
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
13
|
+
*/
|
|
14
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
15
|
+
if (k2 === undefined) k2 = k;
|
|
16
|
+
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
|
|
17
|
+
}) : (function(o, m, k, k2) {
|
|
18
|
+
if (k2 === undefined) k2 = k;
|
|
19
|
+
o[k2] = m[k];
|
|
20
|
+
}));
|
|
21
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
22
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
23
|
+
}) : function(o, v) {
|
|
24
|
+
o["default"] = v;
|
|
25
|
+
});
|
|
26
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
27
|
+
if (mod && mod.__esModule) return mod;
|
|
28
|
+
var result = {};
|
|
29
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
30
|
+
__setModuleDefault(result, mod);
|
|
31
|
+
return result;
|
|
32
|
+
};
|
|
33
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
34
|
+
exports.VideoObject = void 0;
|
|
35
|
+
const model = __importStar(require("../model"));
|
|
36
|
+
var VideoObject;
|
|
37
|
+
(function (VideoObject) {
|
|
38
|
+
function getJsonObj(obj) {
|
|
39
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
40
|
+
"segments": obj.segments
|
|
41
|
+
? obj.segments.map(item => {
|
|
42
|
+
return model.VideoObjectSegment.getJsonObj(item);
|
|
43
|
+
})
|
|
44
|
+
: undefined
|
|
45
|
+
});
|
|
46
|
+
return jsonObj;
|
|
47
|
+
}
|
|
48
|
+
VideoObject.getJsonObj = getJsonObj;
|
|
49
|
+
function getDeserializedJsonObj(obj) {
|
|
50
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
51
|
+
"segments": obj.segments
|
|
52
|
+
? obj.segments.map(item => {
|
|
53
|
+
return model.VideoObjectSegment.getDeserializedJsonObj(item);
|
|
54
|
+
})
|
|
55
|
+
: undefined
|
|
56
|
+
});
|
|
57
|
+
return jsonObj;
|
|
58
|
+
}
|
|
59
|
+
VideoObject.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
60
|
+
})(VideoObject = exports.VideoObject || (exports.VideoObject = {}));
|
|
61
|
+
//# sourceMappingURL=video-object.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"video-object.js","sourceRoot":"","sources":["../../../../../lib/aivision/lib/model/video-object.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;GAWG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAiBlC,IAAiB,WAAW,CA6B3B;AA7BD,WAAiB,WAAW;IAC1B,SAAgB,UAAU,CAAC,GAAgB;QACzC,MAAM,OAAO,mCACR,GAAG,GACH;YACD,UAAU,EAAE,GAAG,CAAC,QAAQ;gBACtB,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACtB,OAAO,KAAK,CAAC,kBAAkB,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;gBACnD,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAbe,sBAAU,aAazB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAgB;QACrD,MAAM,OAAO,mCACR,GAAG,GACH;YACD,UAAU,EAAE,GAAG,CAAC,QAAQ;gBACtB,CAAC,CAAC,GAAG,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE;oBACtB,OAAO,KAAK,CAAC,kBAAkB,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;gBAC/D,CAAC,CAAC;gBACJ,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAbe,kCAAsB,yBAarC,CAAA;AACH,CAAC,EA7BgB,WAAW,GAAX,mBAAW,KAAX,mBAAW,QA6B3B"}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
|
+
* OpenAPI spec version: 20220125
|
|
5
|
+
*
|
|
6
|
+
*
|
|
7
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
8
|
+
* Do not edit the class manually.
|
|
9
|
+
*
|
|
10
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
11
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
12
|
+
*/
|
|
13
|
+
/**
|
|
14
|
+
* A sequence of frames that was (or appears to be) continuously captured for a label/object/text?.
|
|
15
|
+
*/
|
|
16
|
+
export interface VideoSegment {
|
|
17
|
+
/**
|
|
18
|
+
* Video start time offset(Milliseconds). Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
19
|
+
*/
|
|
20
|
+
"startTimeOffsetMs": number;
|
|
21
|
+
/**
|
|
22
|
+
* Video end time offset(Milliseconds). Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
23
|
+
*/
|
|
24
|
+
"endTimeOffsetMs": number;
|
|
25
|
+
}
|
|
26
|
+
export declare namespace VideoSegment {
|
|
27
|
+
function getJsonObj(obj: VideoSegment): object;
|
|
28
|
+
function getDeserializedJsonObj(obj: VideoSegment): object;
|
|
29
|
+
}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
|
+
* OpenAPI spec version: 20220125
|
|
6
|
+
*
|
|
7
|
+
*
|
|
8
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
9
|
+
* Do not edit the class manually.
|
|
10
|
+
*
|
|
11
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
12
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
13
|
+
*/
|
|
14
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
15
|
+
exports.VideoSegment = void 0;
|
|
16
|
+
var VideoSegment;
|
|
17
|
+
(function (VideoSegment) {
|
|
18
|
+
function getJsonObj(obj) {
|
|
19
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {});
|
|
20
|
+
return jsonObj;
|
|
21
|
+
}
|
|
22
|
+
VideoSegment.getJsonObj = getJsonObj;
|
|
23
|
+
function getDeserializedJsonObj(obj) {
|
|
24
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {});
|
|
25
|
+
return jsonObj;
|
|
26
|
+
}
|
|
27
|
+
VideoSegment.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
28
|
+
})(VideoSegment = exports.VideoSegment || (exports.VideoSegment = {}));
|
|
29
|
+
//# sourceMappingURL=video-segment.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"video-segment.js","sourceRoot":"","sources":["../../../../../lib/aivision/lib/model/video-segment.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;GAWG;;;AAmBH,IAAiB,YAAY,CAW5B;AAXD,WAAiB,YAAY;IAC3B,SAAgB,UAAU,CAAC,GAAiB;QAC1C,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,OAAO,OAAO,CAAC;IACjB,CAAC;IAJe,uBAAU,aAIzB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAiB;QACtD,MAAM,OAAO,mCAAQ,GAAG,GAAK,EAAE,CAAE,CAAC;QAElC,OAAO,OAAO,CAAC;IACjB,CAAC;IAJe,mCAAsB,yBAIrC,CAAA;AACH,CAAC,EAXgB,YAAY,GAAZ,oBAAY,KAAZ,oBAAY,QAW5B"}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
|
+
* OpenAPI spec version: 20220125
|
|
5
|
+
*
|
|
6
|
+
*
|
|
7
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
8
|
+
* Do not edit the class manually.
|
|
9
|
+
*
|
|
10
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
11
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
12
|
+
*/
|
|
13
|
+
import * as model from "../model";
|
|
14
|
+
/**
|
|
15
|
+
* Video text detection feature
|
|
16
|
+
*/
|
|
17
|
+
export interface VideoTextDetectionFeature extends model.VideoFeature {
|
|
18
|
+
/**
|
|
19
|
+
* The minimum confidence score, between 0 and 1,
|
|
20
|
+
* when the value is set, results with lower confidence will not be returned.
|
|
21
|
+
* Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
22
|
+
*/
|
|
23
|
+
"minConfidence"?: number;
|
|
24
|
+
"featureType": string;
|
|
25
|
+
}
|
|
26
|
+
export declare namespace VideoTextDetectionFeature {
|
|
27
|
+
function getJsonObj(obj: VideoTextDetectionFeature, isParentJsonObj?: boolean): object;
|
|
28
|
+
const featureType = "TEXT_DETECTION";
|
|
29
|
+
function getDeserializedJsonObj(obj: VideoTextDetectionFeature, isParentJsonObj?: boolean): object;
|
|
30
|
+
}
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
|
+
* OpenAPI spec version: 20220125
|
|
6
|
+
*
|
|
7
|
+
*
|
|
8
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
9
|
+
* Do not edit the class manually.
|
|
10
|
+
*
|
|
11
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
12
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
13
|
+
*/
|
|
14
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
15
|
+
if (k2 === undefined) k2 = k;
|
|
16
|
+
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
|
|
17
|
+
}) : (function(o, m, k, k2) {
|
|
18
|
+
if (k2 === undefined) k2 = k;
|
|
19
|
+
o[k2] = m[k];
|
|
20
|
+
}));
|
|
21
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
22
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
23
|
+
}) : function(o, v) {
|
|
24
|
+
o["default"] = v;
|
|
25
|
+
});
|
|
26
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
27
|
+
if (mod && mod.__esModule) return mod;
|
|
28
|
+
var result = {};
|
|
29
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
30
|
+
__setModuleDefault(result, mod);
|
|
31
|
+
return result;
|
|
32
|
+
};
|
|
33
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
34
|
+
exports.VideoTextDetectionFeature = void 0;
|
|
35
|
+
const model = __importStar(require("../model"));
|
|
36
|
+
var VideoTextDetectionFeature;
|
|
37
|
+
(function (VideoTextDetectionFeature) {
|
|
38
|
+
function getJsonObj(obj, isParentJsonObj) {
|
|
39
|
+
const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj
|
|
40
|
+
? obj
|
|
41
|
+
: model.VideoFeature.getJsonObj(obj))), {});
|
|
42
|
+
return jsonObj;
|
|
43
|
+
}
|
|
44
|
+
VideoTextDetectionFeature.getJsonObj = getJsonObj;
|
|
45
|
+
VideoTextDetectionFeature.featureType = "TEXT_DETECTION";
|
|
46
|
+
function getDeserializedJsonObj(obj, isParentJsonObj) {
|
|
47
|
+
const jsonObj = Object.assign(Object.assign({}, (isParentJsonObj
|
|
48
|
+
? obj
|
|
49
|
+
: model.VideoFeature.getDeserializedJsonObj(obj))), {});
|
|
50
|
+
return jsonObj;
|
|
51
|
+
}
|
|
52
|
+
VideoTextDetectionFeature.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
53
|
+
})(VideoTextDetectionFeature = exports.VideoTextDetectionFeature || (exports.VideoTextDetectionFeature = {}));
|
|
54
|
+
//# sourceMappingURL=video-text-detection-feature.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"video-text-detection-feature.js","sourceRoot":"","sources":["../../../../../lib/aivision/lib/model/video-text-detection-feature.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;GAWG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAiBlC,IAAiB,yBAAyB,CAyBzC;AAzBD,WAAiB,yBAAyB;IACxC,SAAgB,UAAU,CAAC,GAA8B,EAAE,eAAyB;QAClF,MAAM,OAAO,mCACR,CAAC,eAAe;YACjB,CAAC,CAAC,GAAG;YACL,CAAC,CAAE,KAAK,CAAC,YAAY,CAAC,UAAU,CAAC,GAAG,CAA+B,CAAC,GACnE,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IATe,oCAAU,aASzB,CAAA;IACY,qCAAW,GAAG,gBAAgB,CAAC;IAC5C,SAAgB,sBAAsB,CACpC,GAA8B,EAC9B,eAAyB;QAEzB,MAAM,OAAO,mCACR,CAAC,eAAe;YACjB,CAAC,CAAC,GAAG;YACL,CAAC,CAAE,KAAK,CAAC,YAAY,CAAC,sBAAsB,CAAC,GAAG,CAA+B,CAAC,GAC/E,EAAE,CACN,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAZe,gDAAsB,yBAYrC,CAAA;AACH,CAAC,EAzBgB,yBAAyB,GAAzB,iCAAyB,KAAzB,iCAAyB,QAyBzC"}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
|
+
* OpenAPI spec version: 20220125
|
|
5
|
+
*
|
|
6
|
+
*
|
|
7
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
8
|
+
* Do not edit the class manually.
|
|
9
|
+
*
|
|
10
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
11
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
12
|
+
*/
|
|
13
|
+
import * as model from "../model";
|
|
14
|
+
/**
|
|
15
|
+
* A text frame.
|
|
16
|
+
*/
|
|
17
|
+
export interface VideoTextFrame {
|
|
18
|
+
/**
|
|
19
|
+
* Time offset(Milliseconds) in the video. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
20
|
+
*/
|
|
21
|
+
"timeOffsetMs": number;
|
|
22
|
+
/**
|
|
23
|
+
* The confidence score, between 0 and 1. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
24
|
+
*/
|
|
25
|
+
"confidence": number;
|
|
26
|
+
"boundingPolygon": model.BoundingPolygon;
|
|
27
|
+
}
|
|
28
|
+
export declare namespace VideoTextFrame {
|
|
29
|
+
function getJsonObj(obj: VideoTextFrame): object;
|
|
30
|
+
function getDeserializedJsonObj(obj: VideoTextFrame): object;
|
|
31
|
+
}
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Vision API
|
|
4
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
5
|
+
* OpenAPI spec version: 20220125
|
|
6
|
+
*
|
|
7
|
+
*
|
|
8
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
9
|
+
* Do not edit the class manually.
|
|
10
|
+
*
|
|
11
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
12
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
13
|
+
*/
|
|
14
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
15
|
+
if (k2 === undefined) k2 = k;
|
|
16
|
+
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
|
|
17
|
+
}) : (function(o, m, k, k2) {
|
|
18
|
+
if (k2 === undefined) k2 = k;
|
|
19
|
+
o[k2] = m[k];
|
|
20
|
+
}));
|
|
21
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
22
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
23
|
+
}) : function(o, v) {
|
|
24
|
+
o["default"] = v;
|
|
25
|
+
});
|
|
26
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
|
27
|
+
if (mod && mod.__esModule) return mod;
|
|
28
|
+
var result = {};
|
|
29
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
30
|
+
__setModuleDefault(result, mod);
|
|
31
|
+
return result;
|
|
32
|
+
};
|
|
33
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
34
|
+
exports.VideoTextFrame = void 0;
|
|
35
|
+
const model = __importStar(require("../model"));
|
|
36
|
+
var VideoTextFrame;
|
|
37
|
+
(function (VideoTextFrame) {
|
|
38
|
+
function getJsonObj(obj) {
|
|
39
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
40
|
+
"boundingPolygon": obj.boundingPolygon
|
|
41
|
+
? model.BoundingPolygon.getJsonObj(obj.boundingPolygon)
|
|
42
|
+
: undefined
|
|
43
|
+
});
|
|
44
|
+
return jsonObj;
|
|
45
|
+
}
|
|
46
|
+
VideoTextFrame.getJsonObj = getJsonObj;
|
|
47
|
+
function getDeserializedJsonObj(obj) {
|
|
48
|
+
const jsonObj = Object.assign(Object.assign({}, obj), {
|
|
49
|
+
"boundingPolygon": obj.boundingPolygon
|
|
50
|
+
? model.BoundingPolygon.getDeserializedJsonObj(obj.boundingPolygon)
|
|
51
|
+
: undefined
|
|
52
|
+
});
|
|
53
|
+
return jsonObj;
|
|
54
|
+
}
|
|
55
|
+
VideoTextFrame.getDeserializedJsonObj = getDeserializedJsonObj;
|
|
56
|
+
})(VideoTextFrame = exports.VideoTextFrame || (exports.VideoTextFrame = {}));
|
|
57
|
+
//# sourceMappingURL=video-text-frame.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"video-text-frame.js","sourceRoot":"","sources":["../../../../../lib/aivision/lib/model/video-text-frame.ts"],"names":[],"mappings":";AAAA;;;;;;;;;;;GAWG;;;;;;;;;;;;;;;;;;;;;;AAEH,gDAAkC;AAkBlC,IAAiB,cAAc,CAyB9B;AAzBD,WAAiB,cAAc;IAC7B,SAAgB,UAAU,CAAC,GAAmB;QAC5C,MAAM,OAAO,mCACR,GAAG,GACH;YACD,iBAAiB,EAAE,GAAG,CAAC,eAAe;gBACpC,CAAC,CAAC,KAAK,CAAC,eAAe,CAAC,UAAU,CAAC,GAAG,CAAC,eAAe,CAAC;gBACvD,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAXe,yBAAU,aAWzB,CAAA;IACD,SAAgB,sBAAsB,CAAC,GAAmB;QACxD,MAAM,OAAO,mCACR,GAAG,GACH;YACD,iBAAiB,EAAE,GAAG,CAAC,eAAe;gBACpC,CAAC,CAAC,KAAK,CAAC,eAAe,CAAC,sBAAsB,CAAC,GAAG,CAAC,eAAe,CAAC;gBACnE,CAAC,CAAC,SAAS;SACd,CACF,CAAC;QAEF,OAAO,OAAO,CAAC;IACjB,CAAC;IAXe,qCAAsB,yBAWrC,CAAA;AACH,CAAC,EAzBgB,cAAc,GAAd,sBAAc,KAAd,sBAAc,QAyB9B"}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Vision API
|
|
3
|
+
* Using Vision, you can upload images to detect and classify objects in them. If you have lots of images, you can process them in batch using asynchronous API endpoints. Vision's features are thematically split between Document AI for document-centric images, and Image Analysis for object and scene-based images. Pretrained models and custom models are supported.
|
|
4
|
+
* OpenAPI spec version: 20220125
|
|
5
|
+
*
|
|
6
|
+
*
|
|
7
|
+
* NOTE: This class is auto generated by OracleSDKGenerator.
|
|
8
|
+
* Do not edit the class manually.
|
|
9
|
+
*
|
|
10
|
+
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
|
11
|
+
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
|
|
12
|
+
*/
|
|
13
|
+
import * as model from "../model";
|
|
14
|
+
/**
|
|
15
|
+
* A text segment in a video.
|
|
16
|
+
*/
|
|
17
|
+
export interface VideoTextSegment {
|
|
18
|
+
"videoSegment": model.VideoSegment;
|
|
19
|
+
/**
|
|
20
|
+
* The confidence score, between 0 and 1. Note: Numbers greater than Number.MAX_SAFE_INTEGER will result in rounding issues.
|
|
21
|
+
*/
|
|
22
|
+
"confidence": number;
|
|
23
|
+
/**
|
|
24
|
+
* Text frame in a segment.
|
|
25
|
+
*/
|
|
26
|
+
"frames": Array<model.VideoTextFrame>;
|
|
27
|
+
}
|
|
28
|
+
export declare namespace VideoTextSegment {
|
|
29
|
+
function getJsonObj(obj: VideoTextSegment): object;
|
|
30
|
+
function getDeserializedJsonObj(obj: VideoTextSegment): object;
|
|
31
|
+
}
|