@alicloud/quanmiaolightapp20240801 2.11.0 → 2.12.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/client.d.ts +116 -0
- package/dist/client.js +453 -0
- package/dist/client.js.map +1 -1
- package/dist/models/GetVideoDetectShotConfigResponse.d.ts +19 -0
- package/dist/models/GetVideoDetectShotConfigResponse.js +69 -0
- package/dist/models/GetVideoDetectShotConfigResponse.js.map +1 -0
- package/dist/models/GetVideoDetectShotConfigResponseBody.d.ts +62 -0
- package/dist/models/GetVideoDetectShotConfigResponseBody.js +90 -0
- package/dist/models/GetVideoDetectShotConfigResponseBody.js.map +1 -0
- package/dist/models/GetVideoDetectShotTaskRequest.d.ts +21 -0
- package/dist/models/GetVideoDetectShotTaskRequest.js +58 -0
- package/dist/models/GetVideoDetectShotTaskRequest.js.map +1 -0
- package/dist/models/GetVideoDetectShotTaskResponse.d.ts +19 -0
- package/dist/models/GetVideoDetectShotTaskResponse.js +69 -0
- package/dist/models/GetVideoDetectShotTaskResponse.js.map +1 -0
- package/dist/models/GetVideoDetectShotTaskResponseBody.d.ts +258 -0
- package/dist/models/GetVideoDetectShotTaskResponseBody.js +291 -0
- package/dist/models/GetVideoDetectShotTaskResponseBody.js.map +1 -0
- package/dist/models/RunVideoAnalysisRequest.d.ts +5 -0
- package/dist/models/RunVideoAnalysisRequest.js +2 -0
- package/dist/models/RunVideoAnalysisRequest.js.map +1 -1
- package/dist/models/RunVideoAnalysisShrinkRequest.d.ts +5 -0
- package/dist/models/RunVideoAnalysisShrinkRequest.js +2 -0
- package/dist/models/RunVideoAnalysisShrinkRequest.js.map +1 -1
- package/dist/models/RunVideoDetectShotRequest.d.ts +86 -0
- package/dist/models/RunVideoDetectShotRequest.js +90 -0
- package/dist/models/RunVideoDetectShotRequest.js.map +1 -0
- package/dist/models/RunVideoDetectShotResponse.d.ts +19 -0
- package/dist/models/RunVideoDetectShotResponse.js +69 -0
- package/dist/models/RunVideoDetectShotResponse.js.map +1 -0
- package/dist/models/RunVideoDetectShotResponseBody.d.ts +183 -0
- package/dist/models/RunVideoDetectShotResponseBody.js +229 -0
- package/dist/models/RunVideoDetectShotResponseBody.js.map +1 -0
- package/dist/models/RunVideoDetectShotShrinkRequest.d.ts +86 -0
- package/dist/models/RunVideoDetectShotShrinkRequest.js +84 -0
- package/dist/models/RunVideoDetectShotShrinkRequest.js.map +1 -0
- package/dist/models/SubmitVideoAnalysisTaskRequest.d.ts +5 -0
- package/dist/models/SubmitVideoAnalysisTaskRequest.js +2 -0
- package/dist/models/SubmitVideoAnalysisTaskRequest.js.map +1 -1
- package/dist/models/SubmitVideoAnalysisTaskShrinkRequest.d.ts +5 -0
- package/dist/models/SubmitVideoAnalysisTaskShrinkRequest.js +2 -0
- package/dist/models/SubmitVideoAnalysisTaskShrinkRequest.js.map +1 -1
- package/dist/models/SubmitVideoDetectShotTaskRequest.d.ts +83 -0
- package/dist/models/SubmitVideoDetectShotTaskRequest.js +92 -0
- package/dist/models/SubmitVideoDetectShotTaskRequest.js.map +1 -0
- package/dist/models/SubmitVideoDetectShotTaskResponse.d.ts +19 -0
- package/dist/models/SubmitVideoDetectShotTaskResponse.js +69 -0
- package/dist/models/SubmitVideoDetectShotTaskResponse.js.map +1 -0
- package/dist/models/SubmitVideoDetectShotTaskResponseBody.d.ts +59 -0
- package/dist/models/SubmitVideoDetectShotTaskResponseBody.js +90 -0
- package/dist/models/SubmitVideoDetectShotTaskResponseBody.js.map +1 -0
- package/dist/models/SubmitVideoDetectShotTaskShrinkRequest.d.ts +83 -0
- package/dist/models/SubmitVideoDetectShotTaskShrinkRequest.js +86 -0
- package/dist/models/SubmitVideoDetectShotTaskShrinkRequest.js.map +1 -0
- package/dist/models/UpdateVideoDetectShotConfigRequest.d.ts +21 -0
- package/dist/models/UpdateVideoDetectShotConfigRequest.js +58 -0
- package/dist/models/UpdateVideoDetectShotConfigRequest.js.map +1 -0
- package/dist/models/UpdateVideoDetectShotConfigResponse.d.ts +19 -0
- package/dist/models/UpdateVideoDetectShotConfigResponse.js +69 -0
- package/dist/models/UpdateVideoDetectShotConfigResponse.js.map +1 -0
- package/dist/models/UpdateVideoDetectShotConfigResponseBody.d.ts +41 -0
- package/dist/models/UpdateVideoDetectShotConfigResponseBody.js +66 -0
- package/dist/models/UpdateVideoDetectShotConfigResponseBody.js.map +1 -0
- package/dist/models/UpdateVideoDetectShotTaskRequest.d.ts +29 -0
- package/dist/models/UpdateVideoDetectShotTaskRequest.js +60 -0
- package/dist/models/UpdateVideoDetectShotTaskRequest.js.map +1 -0
- package/dist/models/UpdateVideoDetectShotTaskResponse.d.ts +19 -0
- package/dist/models/UpdateVideoDetectShotTaskResponse.js +69 -0
- package/dist/models/UpdateVideoDetectShotTaskResponse.js.map +1 -0
- package/dist/models/UpdateVideoDetectShotTaskResponseBody.d.ts +69 -0
- package/dist/models/UpdateVideoDetectShotTaskResponseBody.js +94 -0
- package/dist/models/UpdateVideoDetectShotTaskResponseBody.js.map +1 -0
- package/dist/models/model.d.ts +36 -0
- package/dist/models/model.js +79 -6
- package/dist/models/model.js.map +1 -1
- package/package.json +1 -1
- package/src/client.ts +512 -0
- package/src/models/GetVideoDetectShotConfigResponse.ts +40 -0
- package/src/models/GetVideoDetectShotConfigResponseBody.ts +98 -0
- package/src/models/GetVideoDetectShotTaskRequest.ts +34 -0
- package/src/models/GetVideoDetectShotTaskResponse.ts +40 -0
- package/src/models/GetVideoDetectShotTaskResponseBody.ts +430 -0
- package/src/models/RunVideoAnalysisRequest.ts +7 -0
- package/src/models/RunVideoAnalysisShrinkRequest.ts +7 -0
- package/src/models/RunVideoDetectShotRequest.ts +131 -0
- package/src/models/RunVideoDetectShotResponse.ts +40 -0
- package/src/models/RunVideoDetectShotResponseBody.ts +311 -0
- package/src/models/RunVideoDetectShotShrinkRequest.ts +125 -0
- package/src/models/SubmitVideoAnalysisTaskRequest.ts +7 -0
- package/src/models/SubmitVideoAnalysisTaskShrinkRequest.ts +7 -0
- package/src/models/SubmitVideoDetectShotTaskRequest.ts +130 -0
- package/src/models/SubmitVideoDetectShotTaskResponse.ts +40 -0
- package/src/models/SubmitVideoDetectShotTaskResponseBody.ts +95 -0
- package/src/models/SubmitVideoDetectShotTaskShrinkRequest.ts +124 -0
- package/src/models/UpdateVideoDetectShotConfigRequest.ts +34 -0
- package/src/models/UpdateVideoDetectShotConfigResponse.ts +40 -0
- package/src/models/UpdateVideoDetectShotConfigResponseBody.ts +62 -0
- package/src/models/UpdateVideoDetectShotTaskRequest.ts +44 -0
- package/src/models/UpdateVideoDetectShotTaskResponse.ts +40 -0
- package/src/models/UpdateVideoDetectShotTaskResponseBody.ts +109 -0
- package/src/models/model.ts +36 -0
package/dist/client.d.ts
CHANGED
|
@@ -185,6 +185,39 @@ export default class Client extends OpenApi {
|
|
|
185
185
|
* @returns GetVideoAnalysisTaskResponse
|
|
186
186
|
*/
|
|
187
187
|
getVideoAnalysisTask(workspaceId: string, request: $_model.GetVideoAnalysisTaskRequest): Promise<$_model.GetVideoAnalysisTaskResponse>;
|
|
188
|
+
/**
|
|
189
|
+
* 智能拆条-获取配置
|
|
190
|
+
*
|
|
191
|
+
* @param headers - map
|
|
192
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
193
|
+
* @returns GetVideoDetectShotConfigResponse
|
|
194
|
+
*/
|
|
195
|
+
getVideoDetectShotConfigWithOptions(workspaceId: string, headers: {
|
|
196
|
+
[key: string]: string;
|
|
197
|
+
}, runtime: $dara.RuntimeOptions): Promise<$_model.GetVideoDetectShotConfigResponse>;
|
|
198
|
+
/**
|
|
199
|
+
* 智能拆条-获取配置
|
|
200
|
+
* @returns GetVideoDetectShotConfigResponse
|
|
201
|
+
*/
|
|
202
|
+
getVideoDetectShotConfig(workspaceId: string): Promise<$_model.GetVideoDetectShotConfigResponse>;
|
|
203
|
+
/**
|
|
204
|
+
* 轻应用-获取视频拆条异步任务结果
|
|
205
|
+
*
|
|
206
|
+
* @param request - GetVideoDetectShotTaskRequest
|
|
207
|
+
* @param headers - map
|
|
208
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
209
|
+
* @returns GetVideoDetectShotTaskResponse
|
|
210
|
+
*/
|
|
211
|
+
getVideoDetectShotTaskWithOptions(workspaceId: string, request: $_model.GetVideoDetectShotTaskRequest, headers: {
|
|
212
|
+
[key: string]: string;
|
|
213
|
+
}, runtime: $dara.RuntimeOptions): Promise<$_model.GetVideoDetectShotTaskResponse>;
|
|
214
|
+
/**
|
|
215
|
+
* 轻应用-获取视频拆条异步任务结果
|
|
216
|
+
*
|
|
217
|
+
* @param request - GetVideoDetectShotTaskRequest
|
|
218
|
+
* @returns GetVideoDetectShotTaskResponse
|
|
219
|
+
*/
|
|
220
|
+
getVideoDetectShotTask(workspaceId: string, request: $_model.GetVideoDetectShotTaskRequest): Promise<$_model.GetVideoDetectShotTaskResponse>;
|
|
188
221
|
/**
|
|
189
222
|
* 热点新闻推荐
|
|
190
223
|
*
|
|
@@ -674,6 +707,35 @@ export default class Client extends OpenApi {
|
|
|
674
707
|
* @returns RunVideoAnalysisResponse
|
|
675
708
|
*/
|
|
676
709
|
runVideoAnalysis(workspaceId: string, request: $_model.RunVideoAnalysisRequest): Promise<$_model.RunVideoAnalysisResponse>;
|
|
710
|
+
/**
|
|
711
|
+
* 轻应用-视频拆条
|
|
712
|
+
*
|
|
713
|
+
* @param tmpReq - RunVideoDetectShotRequest
|
|
714
|
+
* @param headers - map
|
|
715
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
716
|
+
* @returns RunVideoDetectShotResponse
|
|
717
|
+
*/
|
|
718
|
+
runVideoDetectShotWithSSE(workspaceId: string, tmpReq: $_model.RunVideoDetectShotRequest, headers: {
|
|
719
|
+
[key: string]: string;
|
|
720
|
+
}, runtime: $dara.RuntimeOptions): AsyncGenerator<$_model.RunVideoDetectShotResponse, any, unknown>;
|
|
721
|
+
/**
|
|
722
|
+
* 轻应用-视频拆条
|
|
723
|
+
*
|
|
724
|
+
* @param tmpReq - RunVideoDetectShotRequest
|
|
725
|
+
* @param headers - map
|
|
726
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
727
|
+
* @returns RunVideoDetectShotResponse
|
|
728
|
+
*/
|
|
729
|
+
runVideoDetectShotWithOptions(workspaceId: string, tmpReq: $_model.RunVideoDetectShotRequest, headers: {
|
|
730
|
+
[key: string]: string;
|
|
731
|
+
}, runtime: $dara.RuntimeOptions): Promise<$_model.RunVideoDetectShotResponse>;
|
|
732
|
+
/**
|
|
733
|
+
* 轻应用-视频拆条
|
|
734
|
+
*
|
|
735
|
+
* @param request - RunVideoDetectShotRequest
|
|
736
|
+
* @returns RunVideoDetectShotResponse
|
|
737
|
+
*/
|
|
738
|
+
runVideoDetectShot(workspaceId: string, request: $_model.RunVideoDetectShotRequest): Promise<$_model.RunVideoDetectShotResponse>;
|
|
677
739
|
/**
|
|
678
740
|
* 提交企业VOC异步任务
|
|
679
741
|
*
|
|
@@ -746,6 +808,24 @@ export default class Client extends OpenApi {
|
|
|
746
808
|
* @returns SubmitVideoAnalysisTaskResponse
|
|
747
809
|
*/
|
|
748
810
|
submitVideoAnalysisTask(workspaceId: string, request: $_model.SubmitVideoAnalysisTaskRequest): Promise<$_model.SubmitVideoAnalysisTaskResponse>;
|
|
811
|
+
/**
|
|
812
|
+
* 轻应用-提交视频拆条任务
|
|
813
|
+
*
|
|
814
|
+
* @param tmpReq - SubmitVideoDetectShotTaskRequest
|
|
815
|
+
* @param headers - map
|
|
816
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
817
|
+
* @returns SubmitVideoDetectShotTaskResponse
|
|
818
|
+
*/
|
|
819
|
+
submitVideoDetectShotTaskWithOptions(workspaceId: string, tmpReq: $_model.SubmitVideoDetectShotTaskRequest, headers: {
|
|
820
|
+
[key: string]: string;
|
|
821
|
+
}, runtime: $dara.RuntimeOptions): Promise<$_model.SubmitVideoDetectShotTaskResponse>;
|
|
822
|
+
/**
|
|
823
|
+
* 轻应用-提交视频拆条任务
|
|
824
|
+
*
|
|
825
|
+
* @param request - SubmitVideoDetectShotTaskRequest
|
|
826
|
+
* @returns SubmitVideoDetectShotTaskResponse
|
|
827
|
+
*/
|
|
828
|
+
submitVideoDetectShotTask(workspaceId: string, request: $_model.SubmitVideoDetectShotTaskRequest): Promise<$_model.SubmitVideoDetectShotTaskResponse>;
|
|
749
829
|
/**
|
|
750
830
|
* 视频理解-更新配置
|
|
751
831
|
*
|
|
@@ -800,4 +880,40 @@ export default class Client extends OpenApi {
|
|
|
800
880
|
* @returns UpdateVideoAnalysisTasksResponse
|
|
801
881
|
*/
|
|
802
882
|
updateVideoAnalysisTasks(workspaceId: string, request: $_model.UpdateVideoAnalysisTasksRequest): Promise<$_model.UpdateVideoAnalysisTasksResponse>;
|
|
883
|
+
/**
|
|
884
|
+
* 智能拆条-更新配置
|
|
885
|
+
*
|
|
886
|
+
* @param request - UpdateVideoDetectShotConfigRequest
|
|
887
|
+
* @param headers - map
|
|
888
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
889
|
+
* @returns UpdateVideoDetectShotConfigResponse
|
|
890
|
+
*/
|
|
891
|
+
updateVideoDetectShotConfigWithOptions(workspaceId: string, request: $_model.UpdateVideoDetectShotConfigRequest, headers: {
|
|
892
|
+
[key: string]: string;
|
|
893
|
+
}, runtime: $dara.RuntimeOptions): Promise<$_model.UpdateVideoDetectShotConfigResponse>;
|
|
894
|
+
/**
|
|
895
|
+
* 智能拆条-更新配置
|
|
896
|
+
*
|
|
897
|
+
* @param request - UpdateVideoDetectShotConfigRequest
|
|
898
|
+
* @returns UpdateVideoDetectShotConfigResponse
|
|
899
|
+
*/
|
|
900
|
+
updateVideoDetectShotConfig(workspaceId: string, request: $_model.UpdateVideoDetectShotConfigRequest): Promise<$_model.UpdateVideoDetectShotConfigResponse>;
|
|
901
|
+
/**
|
|
902
|
+
* 视频拆条-修改任务状态
|
|
903
|
+
*
|
|
904
|
+
* @param request - UpdateVideoDetectShotTaskRequest
|
|
905
|
+
* @param headers - map
|
|
906
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
907
|
+
* @returns UpdateVideoDetectShotTaskResponse
|
|
908
|
+
*/
|
|
909
|
+
updateVideoDetectShotTaskWithOptions(workspaceId: string, request: $_model.UpdateVideoDetectShotTaskRequest, headers: {
|
|
910
|
+
[key: string]: string;
|
|
911
|
+
}, runtime: $dara.RuntimeOptions): Promise<$_model.UpdateVideoDetectShotTaskResponse>;
|
|
912
|
+
/**
|
|
913
|
+
* 视频拆条-修改任务状态
|
|
914
|
+
*
|
|
915
|
+
* @param request - UpdateVideoDetectShotTaskRequest
|
|
916
|
+
* @returns UpdateVideoDetectShotTaskResponse
|
|
917
|
+
*/
|
|
918
|
+
updateVideoDetectShotTask(workspaceId: string, request: $_model.UpdateVideoDetectShotTaskRequest): Promise<$_model.UpdateVideoDetectShotTaskResponse>;
|
|
803
919
|
}
|
package/dist/client.js
CHANGED
|
@@ -520,6 +520,81 @@ class Client extends openapi_core_1.default {
|
|
|
520
520
|
let headers = {};
|
|
521
521
|
return await this.getVideoAnalysisTaskWithOptions(workspaceId, request, headers, runtime);
|
|
522
522
|
}
|
|
523
|
+
/**
|
|
524
|
+
* 智能拆条-获取配置
|
|
525
|
+
*
|
|
526
|
+
* @param headers - map
|
|
527
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
528
|
+
* @returns GetVideoDetectShotConfigResponse
|
|
529
|
+
*/
|
|
530
|
+
async getVideoDetectShotConfigWithOptions(workspaceId, headers, runtime) {
|
|
531
|
+
let req = new openapi_core_2.$OpenApiUtil.OpenApiRequest({
|
|
532
|
+
headers: headers,
|
|
533
|
+
});
|
|
534
|
+
let params = new openapi_core_2.$OpenApiUtil.Params({
|
|
535
|
+
action: "GetVideoDetectShotConfig",
|
|
536
|
+
version: "2024-08-01",
|
|
537
|
+
protocol: "HTTPS",
|
|
538
|
+
pathname: `/${$dara.URL.percentEncode(workspaceId)}/quanmiao/lightapp/videoAnalysis/getVideoDetectShotConfig`,
|
|
539
|
+
method: "GET",
|
|
540
|
+
authType: "AK",
|
|
541
|
+
style: "ROA",
|
|
542
|
+
reqBodyType: "json",
|
|
543
|
+
bodyType: "json",
|
|
544
|
+
});
|
|
545
|
+
return $dara.cast(await this.callApi(params, req, runtime), new $_model.GetVideoDetectShotConfigResponse({}));
|
|
546
|
+
}
|
|
547
|
+
/**
|
|
548
|
+
* 智能拆条-获取配置
|
|
549
|
+
* @returns GetVideoDetectShotConfigResponse
|
|
550
|
+
*/
|
|
551
|
+
async getVideoDetectShotConfig(workspaceId) {
|
|
552
|
+
let runtime = new $dara.RuntimeOptions({});
|
|
553
|
+
let headers = {};
|
|
554
|
+
return await this.getVideoDetectShotConfigWithOptions(workspaceId, headers, runtime);
|
|
555
|
+
}
|
|
556
|
+
/**
|
|
557
|
+
* 轻应用-获取视频拆条异步任务结果
|
|
558
|
+
*
|
|
559
|
+
* @param request - GetVideoDetectShotTaskRequest
|
|
560
|
+
* @param headers - map
|
|
561
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
562
|
+
* @returns GetVideoDetectShotTaskResponse
|
|
563
|
+
*/
|
|
564
|
+
async getVideoDetectShotTaskWithOptions(workspaceId, request, headers, runtime) {
|
|
565
|
+
request.validate();
|
|
566
|
+
let query = {};
|
|
567
|
+
if (!$dara.isNull(request.taskId)) {
|
|
568
|
+
query["taskId"] = request.taskId;
|
|
569
|
+
}
|
|
570
|
+
let req = new openapi_core_2.$OpenApiUtil.OpenApiRequest({
|
|
571
|
+
headers: headers,
|
|
572
|
+
query: openapi_core_2.OpenApiUtil.query(query),
|
|
573
|
+
});
|
|
574
|
+
let params = new openapi_core_2.$OpenApiUtil.Params({
|
|
575
|
+
action: "GetVideoDetectShotTask",
|
|
576
|
+
version: "2024-08-01",
|
|
577
|
+
protocol: "HTTPS",
|
|
578
|
+
pathname: `/${$dara.URL.percentEncode(workspaceId)}/quanmiao/lightapp/getVideoDetectShotTask`,
|
|
579
|
+
method: "GET",
|
|
580
|
+
authType: "AK",
|
|
581
|
+
style: "ROA",
|
|
582
|
+
reqBodyType: "json",
|
|
583
|
+
bodyType: "json",
|
|
584
|
+
});
|
|
585
|
+
return $dara.cast(await this.callApi(params, req, runtime), new $_model.GetVideoDetectShotTaskResponse({}));
|
|
586
|
+
}
|
|
587
|
+
/**
|
|
588
|
+
* 轻应用-获取视频拆条异步任务结果
|
|
589
|
+
*
|
|
590
|
+
* @param request - GetVideoDetectShotTaskRequest
|
|
591
|
+
* @returns GetVideoDetectShotTaskResponse
|
|
592
|
+
*/
|
|
593
|
+
async getVideoDetectShotTask(workspaceId, request) {
|
|
594
|
+
let runtime = new $dara.RuntimeOptions({});
|
|
595
|
+
let headers = {};
|
|
596
|
+
return await this.getVideoDetectShotTaskWithOptions(workspaceId, request, headers, runtime);
|
|
597
|
+
}
|
|
523
598
|
/**
|
|
524
599
|
* 热点新闻推荐
|
|
525
600
|
*
|
|
@@ -2591,6 +2666,9 @@ class Client extends openapi_core_1.default {
|
|
|
2591
2666
|
if (!$dara.isNull(request.splitInterval)) {
|
|
2592
2667
|
body["splitInterval"] = request.splitInterval;
|
|
2593
2668
|
}
|
|
2669
|
+
if (!$dara.isNull(request.splitType)) {
|
|
2670
|
+
body["splitType"] = request.splitType;
|
|
2671
|
+
}
|
|
2594
2672
|
if (!$dara.isNull(request.taskId)) {
|
|
2595
2673
|
body["taskId"] = request.taskId;
|
|
2596
2674
|
}
|
|
@@ -2723,6 +2801,9 @@ class Client extends openapi_core_1.default {
|
|
|
2723
2801
|
if (!$dara.isNull(request.splitInterval)) {
|
|
2724
2802
|
body["splitInterval"] = request.splitInterval;
|
|
2725
2803
|
}
|
|
2804
|
+
if (!$dara.isNull(request.splitType)) {
|
|
2805
|
+
body["splitType"] = request.splitType;
|
|
2806
|
+
}
|
|
2726
2807
|
if (!$dara.isNull(request.taskId)) {
|
|
2727
2808
|
body["taskId"] = request.taskId;
|
|
2728
2809
|
}
|
|
@@ -2778,6 +2859,196 @@ class Client extends openapi_core_1.default {
|
|
|
2778
2859
|
let headers = {};
|
|
2779
2860
|
return await this.runVideoAnalysisWithOptions(workspaceId, request, headers, runtime);
|
|
2780
2861
|
}
|
|
2862
|
+
/**
|
|
2863
|
+
* 轻应用-视频拆条
|
|
2864
|
+
*
|
|
2865
|
+
* @param tmpReq - RunVideoDetectShotRequest
|
|
2866
|
+
* @param headers - map
|
|
2867
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
2868
|
+
* @returns RunVideoDetectShotResponse
|
|
2869
|
+
*/
|
|
2870
|
+
runVideoDetectShotWithSSE(workspaceId, tmpReq, headers, runtime) {
|
|
2871
|
+
return __asyncGenerator(this, arguments, function* runVideoDetectShotWithSSE_1() {
|
|
2872
|
+
var _a, e_16, _b, _c;
|
|
2873
|
+
tmpReq.validate();
|
|
2874
|
+
let request = new $_model.RunVideoDetectShotShrinkRequest({});
|
|
2875
|
+
openapi_core_2.OpenApiUtil.convert(tmpReq, request);
|
|
2876
|
+
if (!$dara.isNull(tmpReq.options)) {
|
|
2877
|
+
request.optionsShrink = openapi_core_2.OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.options, "options", "json");
|
|
2878
|
+
}
|
|
2879
|
+
if (!$dara.isNull(tmpReq.recognitionOptions)) {
|
|
2880
|
+
request.recognitionOptionsShrink = openapi_core_2.OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.recognitionOptions, "recognitionOptions", "json");
|
|
2881
|
+
}
|
|
2882
|
+
let body = {};
|
|
2883
|
+
if (!$dara.isNull(request.intelliSimpPrompt)) {
|
|
2884
|
+
body["intelliSimpPrompt"] = request.intelliSimpPrompt;
|
|
2885
|
+
}
|
|
2886
|
+
if (!$dara.isNull(request.intelliSimpPromptTemplateId)) {
|
|
2887
|
+
body["intelliSimpPromptTemplateId"] = request.intelliSimpPromptTemplateId;
|
|
2888
|
+
}
|
|
2889
|
+
if (!$dara.isNull(request.language)) {
|
|
2890
|
+
body["language"] = request.language;
|
|
2891
|
+
}
|
|
2892
|
+
if (!$dara.isNull(request.modelCustomPromptTemplateId)) {
|
|
2893
|
+
body["modelCustomPromptTemplateId"] = request.modelCustomPromptTemplateId;
|
|
2894
|
+
}
|
|
2895
|
+
if (!$dara.isNull(request.modelId)) {
|
|
2896
|
+
body["modelId"] = request.modelId;
|
|
2897
|
+
}
|
|
2898
|
+
if (!$dara.isNull(request.modelVlCustomPromptTemplateId)) {
|
|
2899
|
+
body["modelVlCustomPromptTemplateId"] = request.modelVlCustomPromptTemplateId;
|
|
2900
|
+
}
|
|
2901
|
+
if (!$dara.isNull(request.optionsShrink)) {
|
|
2902
|
+
body["options"] = request.optionsShrink;
|
|
2903
|
+
}
|
|
2904
|
+
if (!$dara.isNull(request.originalSessionId)) {
|
|
2905
|
+
body["originalSessionId"] = request.originalSessionId;
|
|
2906
|
+
}
|
|
2907
|
+
if (!$dara.isNull(request.preModelId)) {
|
|
2908
|
+
body["preModelId"] = request.preModelId;
|
|
2909
|
+
}
|
|
2910
|
+
if (!$dara.isNull(request.prompt)) {
|
|
2911
|
+
body["prompt"] = request.prompt;
|
|
2912
|
+
}
|
|
2913
|
+
if (!$dara.isNull(request.recognitionOptionsShrink)) {
|
|
2914
|
+
body["recognitionOptions"] = request.recognitionOptionsShrink;
|
|
2915
|
+
}
|
|
2916
|
+
if (!$dara.isNull(request.taskId)) {
|
|
2917
|
+
body["taskId"] = request.taskId;
|
|
2918
|
+
}
|
|
2919
|
+
if (!$dara.isNull(request.videoUrl)) {
|
|
2920
|
+
body["videoUrl"] = request.videoUrl;
|
|
2921
|
+
}
|
|
2922
|
+
if (!$dara.isNull(request.vlPrompt)) {
|
|
2923
|
+
body["vlPrompt"] = request.vlPrompt;
|
|
2924
|
+
}
|
|
2925
|
+
let req = new openapi_core_2.$OpenApiUtil.OpenApiRequest({
|
|
2926
|
+
headers: headers,
|
|
2927
|
+
body: openapi_core_2.OpenApiUtil.parseToMap(body),
|
|
2928
|
+
});
|
|
2929
|
+
let params = new openapi_core_2.$OpenApiUtil.Params({
|
|
2930
|
+
action: "RunVideoDetectShot",
|
|
2931
|
+
version: "2024-08-01",
|
|
2932
|
+
protocol: "HTTPS",
|
|
2933
|
+
pathname: `/${$dara.URL.percentEncode(workspaceId)}/quanmiao/lightapp/runVideoDetectShot`,
|
|
2934
|
+
method: "POST",
|
|
2935
|
+
authType: "AK",
|
|
2936
|
+
style: "ROA",
|
|
2937
|
+
reqBodyType: "formData",
|
|
2938
|
+
bodyType: "json",
|
|
2939
|
+
});
|
|
2940
|
+
let sseResp = yield __await(this.callSSEApi(params, req, runtime));
|
|
2941
|
+
try {
|
|
2942
|
+
for (var _d = true, sseResp_16 = __asyncValues(sseResp), sseResp_16_1; sseResp_16_1 = yield __await(sseResp_16.next()), _a = sseResp_16_1.done, !_a; _d = true) {
|
|
2943
|
+
_c = sseResp_16_1.value;
|
|
2944
|
+
_d = false;
|
|
2945
|
+
let resp = _c;
|
|
2946
|
+
let data = JSON.parse(resp.event.data);
|
|
2947
|
+
yield yield __await($dara.cast({
|
|
2948
|
+
statusCode: resp.statusCode,
|
|
2949
|
+
headers: resp.headers,
|
|
2950
|
+
body: Object.assign(Object.assign({}, data), { RequestId: resp.event.id, Message: resp.event.event }),
|
|
2951
|
+
}, new $_model.RunVideoDetectShotResponse({})));
|
|
2952
|
+
}
|
|
2953
|
+
}
|
|
2954
|
+
catch (e_16_1) { e_16 = { error: e_16_1 }; }
|
|
2955
|
+
finally {
|
|
2956
|
+
try {
|
|
2957
|
+
if (!_d && !_a && (_b = sseResp_16.return)) yield __await(_b.call(sseResp_16));
|
|
2958
|
+
}
|
|
2959
|
+
finally { if (e_16) throw e_16.error; }
|
|
2960
|
+
}
|
|
2961
|
+
});
|
|
2962
|
+
}
|
|
2963
|
+
/**
|
|
2964
|
+
* 轻应用-视频拆条
|
|
2965
|
+
*
|
|
2966
|
+
* @param tmpReq - RunVideoDetectShotRequest
|
|
2967
|
+
* @param headers - map
|
|
2968
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
2969
|
+
* @returns RunVideoDetectShotResponse
|
|
2970
|
+
*/
|
|
2971
|
+
async runVideoDetectShotWithOptions(workspaceId, tmpReq, headers, runtime) {
|
|
2972
|
+
tmpReq.validate();
|
|
2973
|
+
let request = new $_model.RunVideoDetectShotShrinkRequest({});
|
|
2974
|
+
openapi_core_2.OpenApiUtil.convert(tmpReq, request);
|
|
2975
|
+
if (!$dara.isNull(tmpReq.options)) {
|
|
2976
|
+
request.optionsShrink = openapi_core_2.OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.options, "options", "json");
|
|
2977
|
+
}
|
|
2978
|
+
if (!$dara.isNull(tmpReq.recognitionOptions)) {
|
|
2979
|
+
request.recognitionOptionsShrink = openapi_core_2.OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.recognitionOptions, "recognitionOptions", "json");
|
|
2980
|
+
}
|
|
2981
|
+
let body = {};
|
|
2982
|
+
if (!$dara.isNull(request.intelliSimpPrompt)) {
|
|
2983
|
+
body["intelliSimpPrompt"] = request.intelliSimpPrompt;
|
|
2984
|
+
}
|
|
2985
|
+
if (!$dara.isNull(request.intelliSimpPromptTemplateId)) {
|
|
2986
|
+
body["intelliSimpPromptTemplateId"] = request.intelliSimpPromptTemplateId;
|
|
2987
|
+
}
|
|
2988
|
+
if (!$dara.isNull(request.language)) {
|
|
2989
|
+
body["language"] = request.language;
|
|
2990
|
+
}
|
|
2991
|
+
if (!$dara.isNull(request.modelCustomPromptTemplateId)) {
|
|
2992
|
+
body["modelCustomPromptTemplateId"] = request.modelCustomPromptTemplateId;
|
|
2993
|
+
}
|
|
2994
|
+
if (!$dara.isNull(request.modelId)) {
|
|
2995
|
+
body["modelId"] = request.modelId;
|
|
2996
|
+
}
|
|
2997
|
+
if (!$dara.isNull(request.modelVlCustomPromptTemplateId)) {
|
|
2998
|
+
body["modelVlCustomPromptTemplateId"] = request.modelVlCustomPromptTemplateId;
|
|
2999
|
+
}
|
|
3000
|
+
if (!$dara.isNull(request.optionsShrink)) {
|
|
3001
|
+
body["options"] = request.optionsShrink;
|
|
3002
|
+
}
|
|
3003
|
+
if (!$dara.isNull(request.originalSessionId)) {
|
|
3004
|
+
body["originalSessionId"] = request.originalSessionId;
|
|
3005
|
+
}
|
|
3006
|
+
if (!$dara.isNull(request.preModelId)) {
|
|
3007
|
+
body["preModelId"] = request.preModelId;
|
|
3008
|
+
}
|
|
3009
|
+
if (!$dara.isNull(request.prompt)) {
|
|
3010
|
+
body["prompt"] = request.prompt;
|
|
3011
|
+
}
|
|
3012
|
+
if (!$dara.isNull(request.recognitionOptionsShrink)) {
|
|
3013
|
+
body["recognitionOptions"] = request.recognitionOptionsShrink;
|
|
3014
|
+
}
|
|
3015
|
+
if (!$dara.isNull(request.taskId)) {
|
|
3016
|
+
body["taskId"] = request.taskId;
|
|
3017
|
+
}
|
|
3018
|
+
if (!$dara.isNull(request.videoUrl)) {
|
|
3019
|
+
body["videoUrl"] = request.videoUrl;
|
|
3020
|
+
}
|
|
3021
|
+
if (!$dara.isNull(request.vlPrompt)) {
|
|
3022
|
+
body["vlPrompt"] = request.vlPrompt;
|
|
3023
|
+
}
|
|
3024
|
+
let req = new openapi_core_2.$OpenApiUtil.OpenApiRequest({
|
|
3025
|
+
headers: headers,
|
|
3026
|
+
body: openapi_core_2.OpenApiUtil.parseToMap(body),
|
|
3027
|
+
});
|
|
3028
|
+
let params = new openapi_core_2.$OpenApiUtil.Params({
|
|
3029
|
+
action: "RunVideoDetectShot",
|
|
3030
|
+
version: "2024-08-01",
|
|
3031
|
+
protocol: "HTTPS",
|
|
3032
|
+
pathname: `/${$dara.URL.percentEncode(workspaceId)}/quanmiao/lightapp/runVideoDetectShot`,
|
|
3033
|
+
method: "POST",
|
|
3034
|
+
authType: "AK",
|
|
3035
|
+
style: "ROA",
|
|
3036
|
+
reqBodyType: "formData",
|
|
3037
|
+
bodyType: "json",
|
|
3038
|
+
});
|
|
3039
|
+
return $dara.cast(await this.callApi(params, req, runtime), new $_model.RunVideoDetectShotResponse({}));
|
|
3040
|
+
}
|
|
3041
|
+
/**
|
|
3042
|
+
* 轻应用-视频拆条
|
|
3043
|
+
*
|
|
3044
|
+
* @param request - RunVideoDetectShotRequest
|
|
3045
|
+
* @returns RunVideoDetectShotResponse
|
|
3046
|
+
*/
|
|
3047
|
+
async runVideoDetectShot(workspaceId, request) {
|
|
3048
|
+
let runtime = new $dara.RuntimeOptions({});
|
|
3049
|
+
let headers = {};
|
|
3050
|
+
return await this.runVideoDetectShotWithOptions(workspaceId, request, headers, runtime);
|
|
3051
|
+
}
|
|
2781
3052
|
/**
|
|
2782
3053
|
* 提交企业VOC异步任务
|
|
2783
3054
|
*
|
|
@@ -3067,6 +3338,9 @@ class Client extends openapi_core_1.default {
|
|
|
3067
3338
|
if (!$dara.isNull(request.splitInterval)) {
|
|
3068
3339
|
body["splitInterval"] = request.splitInterval;
|
|
3069
3340
|
}
|
|
3341
|
+
if (!$dara.isNull(request.splitType)) {
|
|
3342
|
+
body["splitType"] = request.splitType;
|
|
3343
|
+
}
|
|
3070
3344
|
if (!$dara.isNull(request.textProcessTasksShrink)) {
|
|
3071
3345
|
body["textProcessTasks"] = request.textProcessTasksShrink;
|
|
3072
3346
|
}
|
|
@@ -3119,6 +3393,98 @@ class Client extends openapi_core_1.default {
|
|
|
3119
3393
|
let headers = {};
|
|
3120
3394
|
return await this.submitVideoAnalysisTaskWithOptions(workspaceId, request, headers, runtime);
|
|
3121
3395
|
}
|
|
3396
|
+
/**
|
|
3397
|
+
* 轻应用-提交视频拆条任务
|
|
3398
|
+
*
|
|
3399
|
+
* @param tmpReq - SubmitVideoDetectShotTaskRequest
|
|
3400
|
+
* @param headers - map
|
|
3401
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
3402
|
+
* @returns SubmitVideoDetectShotTaskResponse
|
|
3403
|
+
*/
|
|
3404
|
+
async submitVideoDetectShotTaskWithOptions(workspaceId, tmpReq, headers, runtime) {
|
|
3405
|
+
tmpReq.validate();
|
|
3406
|
+
let request = new $_model.SubmitVideoDetectShotTaskShrinkRequest({});
|
|
3407
|
+
openapi_core_2.OpenApiUtil.convert(tmpReq, request);
|
|
3408
|
+
if (!$dara.isNull(tmpReq.options)) {
|
|
3409
|
+
request.optionsShrink = openapi_core_2.OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.options, "options", "json");
|
|
3410
|
+
}
|
|
3411
|
+
if (!$dara.isNull(tmpReq.recognitionOptions)) {
|
|
3412
|
+
request.recognitionOptionsShrink = openapi_core_2.OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.recognitionOptions, "recognitionOptions", "json");
|
|
3413
|
+
}
|
|
3414
|
+
let body = {};
|
|
3415
|
+
if (!$dara.isNull(request.deduplicationId)) {
|
|
3416
|
+
body["deduplicationId"] = request.deduplicationId;
|
|
3417
|
+
}
|
|
3418
|
+
if (!$dara.isNull(request.intelliSimpPrompt)) {
|
|
3419
|
+
body["intelliSimpPrompt"] = request.intelliSimpPrompt;
|
|
3420
|
+
}
|
|
3421
|
+
if (!$dara.isNull(request.intelliSimpPromptTemplateId)) {
|
|
3422
|
+
body["intelliSimpPromptTemplateId"] = request.intelliSimpPromptTemplateId;
|
|
3423
|
+
}
|
|
3424
|
+
if (!$dara.isNull(request.language)) {
|
|
3425
|
+
body["language"] = request.language;
|
|
3426
|
+
}
|
|
3427
|
+
if (!$dara.isNull(request.modelCustomPromptTemplateId)) {
|
|
3428
|
+
body["modelCustomPromptTemplateId"] = request.modelCustomPromptTemplateId;
|
|
3429
|
+
}
|
|
3430
|
+
if (!$dara.isNull(request.modelId)) {
|
|
3431
|
+
body["modelId"] = request.modelId;
|
|
3432
|
+
}
|
|
3433
|
+
if (!$dara.isNull(request.modelVlCustomPromptTemplateId)) {
|
|
3434
|
+
body["modelVlCustomPromptTemplateId"] = request.modelVlCustomPromptTemplateId;
|
|
3435
|
+
}
|
|
3436
|
+
if (!$dara.isNull(request.optionsShrink)) {
|
|
3437
|
+
body["options"] = request.optionsShrink;
|
|
3438
|
+
}
|
|
3439
|
+
if (!$dara.isNull(request.originalSessionId)) {
|
|
3440
|
+
body["originalSessionId"] = request.originalSessionId;
|
|
3441
|
+
}
|
|
3442
|
+
if (!$dara.isNull(request.preModelId)) {
|
|
3443
|
+
body["preModelId"] = request.preModelId;
|
|
3444
|
+
}
|
|
3445
|
+
if (!$dara.isNull(request.prompt)) {
|
|
3446
|
+
body["prompt"] = request.prompt;
|
|
3447
|
+
}
|
|
3448
|
+
if (!$dara.isNull(request.recognitionOptionsShrink)) {
|
|
3449
|
+
body["recognitionOptions"] = request.recognitionOptionsShrink;
|
|
3450
|
+
}
|
|
3451
|
+
if (!$dara.isNull(request.taskId)) {
|
|
3452
|
+
body["taskId"] = request.taskId;
|
|
3453
|
+
}
|
|
3454
|
+
if (!$dara.isNull(request.videoUrl)) {
|
|
3455
|
+
body["videoUrl"] = request.videoUrl;
|
|
3456
|
+
}
|
|
3457
|
+
if (!$dara.isNull(request.vlPrompt)) {
|
|
3458
|
+
body["vlPrompt"] = request.vlPrompt;
|
|
3459
|
+
}
|
|
3460
|
+
let req = new openapi_core_2.$OpenApiUtil.OpenApiRequest({
|
|
3461
|
+
headers: headers,
|
|
3462
|
+
body: openapi_core_2.OpenApiUtil.parseToMap(body),
|
|
3463
|
+
});
|
|
3464
|
+
let params = new openapi_core_2.$OpenApiUtil.Params({
|
|
3465
|
+
action: "SubmitVideoDetectShotTask",
|
|
3466
|
+
version: "2024-08-01",
|
|
3467
|
+
protocol: "HTTPS",
|
|
3468
|
+
pathname: `/${$dara.URL.percentEncode(workspaceId)}/quanmiao/lightapp/submitVideoDetectShotTask`,
|
|
3469
|
+
method: "POST",
|
|
3470
|
+
authType: "AK",
|
|
3471
|
+
style: "ROA",
|
|
3472
|
+
reqBodyType: "formData",
|
|
3473
|
+
bodyType: "json",
|
|
3474
|
+
});
|
|
3475
|
+
return $dara.cast(await this.callApi(params, req, runtime), new $_model.SubmitVideoDetectShotTaskResponse({}));
|
|
3476
|
+
}
|
|
3477
|
+
/**
|
|
3478
|
+
* 轻应用-提交视频拆条任务
|
|
3479
|
+
*
|
|
3480
|
+
* @param request - SubmitVideoDetectShotTaskRequest
|
|
3481
|
+
* @returns SubmitVideoDetectShotTaskResponse
|
|
3482
|
+
*/
|
|
3483
|
+
async submitVideoDetectShotTask(workspaceId, request) {
|
|
3484
|
+
let runtime = new $dara.RuntimeOptions({});
|
|
3485
|
+
let headers = {};
|
|
3486
|
+
return await this.submitVideoDetectShotTaskWithOptions(workspaceId, request, headers, runtime);
|
|
3487
|
+
}
|
|
3122
3488
|
/**
|
|
3123
3489
|
* 视频理解-更新配置
|
|
3124
3490
|
*
|
|
@@ -3256,6 +3622,93 @@ class Client extends openapi_core_1.default {
|
|
|
3256
3622
|
let headers = {};
|
|
3257
3623
|
return await this.updateVideoAnalysisTasksWithOptions(workspaceId, request, headers, runtime);
|
|
3258
3624
|
}
|
|
3625
|
+
/**
|
|
3626
|
+
* 智能拆条-更新配置
|
|
3627
|
+
*
|
|
3628
|
+
* @param request - UpdateVideoDetectShotConfigRequest
|
|
3629
|
+
* @param headers - map
|
|
3630
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
3631
|
+
* @returns UpdateVideoDetectShotConfigResponse
|
|
3632
|
+
*/
|
|
3633
|
+
async updateVideoDetectShotConfigWithOptions(workspaceId, request, headers, runtime) {
|
|
3634
|
+
request.validate();
|
|
3635
|
+
let body = {};
|
|
3636
|
+
if (!$dara.isNull(request.asyncConcurrency)) {
|
|
3637
|
+
body["asyncConcurrency"] = request.asyncConcurrency;
|
|
3638
|
+
}
|
|
3639
|
+
let req = new openapi_core_2.$OpenApiUtil.OpenApiRequest({
|
|
3640
|
+
headers: headers,
|
|
3641
|
+
body: openapi_core_2.OpenApiUtil.parseToMap(body),
|
|
3642
|
+
});
|
|
3643
|
+
let params = new openapi_core_2.$OpenApiUtil.Params({
|
|
3644
|
+
action: "UpdateVideoDetectShotConfig",
|
|
3645
|
+
version: "2024-08-01",
|
|
3646
|
+
protocol: "HTTPS",
|
|
3647
|
+
pathname: `/${$dara.URL.percentEncode(workspaceId)}/quanmiao/lightapp/videoAnalysis/updateVideoDetectShotConfig`,
|
|
3648
|
+
method: "PUT",
|
|
3649
|
+
authType: "AK",
|
|
3650
|
+
style: "ROA",
|
|
3651
|
+
reqBodyType: "formData",
|
|
3652
|
+
bodyType: "json",
|
|
3653
|
+
});
|
|
3654
|
+
return $dara.cast(await this.callApi(params, req, runtime), new $_model.UpdateVideoDetectShotConfigResponse({}));
|
|
3655
|
+
}
|
|
3656
|
+
/**
|
|
3657
|
+
* 智能拆条-更新配置
|
|
3658
|
+
*
|
|
3659
|
+
* @param request - UpdateVideoDetectShotConfigRequest
|
|
3660
|
+
* @returns UpdateVideoDetectShotConfigResponse
|
|
3661
|
+
*/
|
|
3662
|
+
async updateVideoDetectShotConfig(workspaceId, request) {
|
|
3663
|
+
let runtime = new $dara.RuntimeOptions({});
|
|
3664
|
+
let headers = {};
|
|
3665
|
+
return await this.updateVideoDetectShotConfigWithOptions(workspaceId, request, headers, runtime);
|
|
3666
|
+
}
|
|
3667
|
+
/**
|
|
3668
|
+
* 视频拆条-修改任务状态
|
|
3669
|
+
*
|
|
3670
|
+
* @param request - UpdateVideoDetectShotTaskRequest
|
|
3671
|
+
* @param headers - map
|
|
3672
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
3673
|
+
* @returns UpdateVideoDetectShotTaskResponse
|
|
3674
|
+
*/
|
|
3675
|
+
async updateVideoDetectShotTaskWithOptions(workspaceId, request, headers, runtime) {
|
|
3676
|
+
request.validate();
|
|
3677
|
+
let body = {};
|
|
3678
|
+
if (!$dara.isNull(request.taskId)) {
|
|
3679
|
+
body["taskId"] = request.taskId;
|
|
3680
|
+
}
|
|
3681
|
+
if (!$dara.isNull(request.taskStatus)) {
|
|
3682
|
+
body["taskStatus"] = request.taskStatus;
|
|
3683
|
+
}
|
|
3684
|
+
let req = new openapi_core_2.$OpenApiUtil.OpenApiRequest({
|
|
3685
|
+
headers: headers,
|
|
3686
|
+
body: openapi_core_2.OpenApiUtil.parseToMap(body),
|
|
3687
|
+
});
|
|
3688
|
+
let params = new openapi_core_2.$OpenApiUtil.Params({
|
|
3689
|
+
action: "UpdateVideoDetectShotTask",
|
|
3690
|
+
version: "2024-08-01",
|
|
3691
|
+
protocol: "HTTPS",
|
|
3692
|
+
pathname: `/${$dara.URL.percentEncode(workspaceId)}/quanmiao/lightapp/updateVideoDetectShotTask`,
|
|
3693
|
+
method: "PUT",
|
|
3694
|
+
authType: "AK",
|
|
3695
|
+
style: "ROA",
|
|
3696
|
+
reqBodyType: "formData",
|
|
3697
|
+
bodyType: "json",
|
|
3698
|
+
});
|
|
3699
|
+
return $dara.cast(await this.callApi(params, req, runtime), new $_model.UpdateVideoDetectShotTaskResponse({}));
|
|
3700
|
+
}
|
|
3701
|
+
/**
|
|
3702
|
+
* 视频拆条-修改任务状态
|
|
3703
|
+
*
|
|
3704
|
+
* @param request - UpdateVideoDetectShotTaskRequest
|
|
3705
|
+
* @returns UpdateVideoDetectShotTaskResponse
|
|
3706
|
+
*/
|
|
3707
|
+
async updateVideoDetectShotTask(workspaceId, request) {
|
|
3708
|
+
let runtime = new $dara.RuntimeOptions({});
|
|
3709
|
+
let headers = {};
|
|
3710
|
+
return await this.updateVideoDetectShotTaskWithOptions(workspaceId, request, headers, runtime);
|
|
3711
|
+
}
|
|
3259
3712
|
}
|
|
3260
3713
|
exports.default = Client;
|
|
3261
3714
|
//# sourceMappingURL=client.js.map
|