@alicloud/quanmiaolightapp20240801 2.11.0 → 2.12.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/client.d.ts +116 -0
- package/dist/client.js +453 -0
- package/dist/client.js.map +1 -1
- package/dist/models/GetVideoDetectShotConfigResponse.d.ts +19 -0
- package/dist/models/GetVideoDetectShotConfigResponse.js +69 -0
- package/dist/models/GetVideoDetectShotConfigResponse.js.map +1 -0
- package/dist/models/GetVideoDetectShotConfigResponseBody.d.ts +62 -0
- package/dist/models/GetVideoDetectShotConfigResponseBody.js +90 -0
- package/dist/models/GetVideoDetectShotConfigResponseBody.js.map +1 -0
- package/dist/models/GetVideoDetectShotTaskRequest.d.ts +21 -0
- package/dist/models/GetVideoDetectShotTaskRequest.js +58 -0
- package/dist/models/GetVideoDetectShotTaskRequest.js.map +1 -0
- package/dist/models/GetVideoDetectShotTaskResponse.d.ts +19 -0
- package/dist/models/GetVideoDetectShotTaskResponse.js +69 -0
- package/dist/models/GetVideoDetectShotTaskResponse.js.map +1 -0
- package/dist/models/GetVideoDetectShotTaskResponseBody.d.ts +258 -0
- package/dist/models/GetVideoDetectShotTaskResponseBody.js +291 -0
- package/dist/models/GetVideoDetectShotTaskResponseBody.js.map +1 -0
- package/dist/models/RunVideoAnalysisRequest.d.ts +5 -0
- package/dist/models/RunVideoAnalysisRequest.js +2 -0
- package/dist/models/RunVideoAnalysisRequest.js.map +1 -1
- package/dist/models/RunVideoAnalysisShrinkRequest.d.ts +5 -0
- package/dist/models/RunVideoAnalysisShrinkRequest.js +2 -0
- package/dist/models/RunVideoAnalysisShrinkRequest.js.map +1 -1
- package/dist/models/RunVideoDetectShotRequest.d.ts +86 -0
- package/dist/models/RunVideoDetectShotRequest.js +90 -0
- package/dist/models/RunVideoDetectShotRequest.js.map +1 -0
- package/dist/models/RunVideoDetectShotResponse.d.ts +19 -0
- package/dist/models/RunVideoDetectShotResponse.js +69 -0
- package/dist/models/RunVideoDetectShotResponse.js.map +1 -0
- package/dist/models/RunVideoDetectShotResponseBody.d.ts +183 -0
- package/dist/models/RunVideoDetectShotResponseBody.js +229 -0
- package/dist/models/RunVideoDetectShotResponseBody.js.map +1 -0
- package/dist/models/RunVideoDetectShotShrinkRequest.d.ts +86 -0
- package/dist/models/RunVideoDetectShotShrinkRequest.js +84 -0
- package/dist/models/RunVideoDetectShotShrinkRequest.js.map +1 -0
- package/dist/models/SubmitVideoAnalysisTaskRequest.d.ts +5 -0
- package/dist/models/SubmitVideoAnalysisTaskRequest.js +2 -0
- package/dist/models/SubmitVideoAnalysisTaskRequest.js.map +1 -1
- package/dist/models/SubmitVideoAnalysisTaskShrinkRequest.d.ts +5 -0
- package/dist/models/SubmitVideoAnalysisTaskShrinkRequest.js +2 -0
- package/dist/models/SubmitVideoAnalysisTaskShrinkRequest.js.map +1 -1
- package/dist/models/SubmitVideoDetectShotTaskRequest.d.ts +83 -0
- package/dist/models/SubmitVideoDetectShotTaskRequest.js +92 -0
- package/dist/models/SubmitVideoDetectShotTaskRequest.js.map +1 -0
- package/dist/models/SubmitVideoDetectShotTaskResponse.d.ts +19 -0
- package/dist/models/SubmitVideoDetectShotTaskResponse.js +69 -0
- package/dist/models/SubmitVideoDetectShotTaskResponse.js.map +1 -0
- package/dist/models/SubmitVideoDetectShotTaskResponseBody.d.ts +59 -0
- package/dist/models/SubmitVideoDetectShotTaskResponseBody.js +90 -0
- package/dist/models/SubmitVideoDetectShotTaskResponseBody.js.map +1 -0
- package/dist/models/SubmitVideoDetectShotTaskShrinkRequest.d.ts +83 -0
- package/dist/models/SubmitVideoDetectShotTaskShrinkRequest.js +86 -0
- package/dist/models/SubmitVideoDetectShotTaskShrinkRequest.js.map +1 -0
- package/dist/models/UpdateVideoDetectShotConfigRequest.d.ts +21 -0
- package/dist/models/UpdateVideoDetectShotConfigRequest.js +58 -0
- package/dist/models/UpdateVideoDetectShotConfigRequest.js.map +1 -0
- package/dist/models/UpdateVideoDetectShotConfigResponse.d.ts +19 -0
- package/dist/models/UpdateVideoDetectShotConfigResponse.js +69 -0
- package/dist/models/UpdateVideoDetectShotConfigResponse.js.map +1 -0
- package/dist/models/UpdateVideoDetectShotConfigResponseBody.d.ts +41 -0
- package/dist/models/UpdateVideoDetectShotConfigResponseBody.js +66 -0
- package/dist/models/UpdateVideoDetectShotConfigResponseBody.js.map +1 -0
- package/dist/models/UpdateVideoDetectShotTaskRequest.d.ts +29 -0
- package/dist/models/UpdateVideoDetectShotTaskRequest.js +60 -0
- package/dist/models/UpdateVideoDetectShotTaskRequest.js.map +1 -0
- package/dist/models/UpdateVideoDetectShotTaskResponse.d.ts +19 -0
- package/dist/models/UpdateVideoDetectShotTaskResponse.js +69 -0
- package/dist/models/UpdateVideoDetectShotTaskResponse.js.map +1 -0
- package/dist/models/UpdateVideoDetectShotTaskResponseBody.d.ts +69 -0
- package/dist/models/UpdateVideoDetectShotTaskResponseBody.js +94 -0
- package/dist/models/UpdateVideoDetectShotTaskResponseBody.js.map +1 -0
- package/dist/models/model.d.ts +36 -0
- package/dist/models/model.js +79 -6
- package/dist/models/model.js.map +1 -1
- package/package.json +1 -1
- package/src/client.ts +512 -0
- package/src/models/GetVideoDetectShotConfigResponse.ts +40 -0
- package/src/models/GetVideoDetectShotConfigResponseBody.ts +98 -0
- package/src/models/GetVideoDetectShotTaskRequest.ts +34 -0
- package/src/models/GetVideoDetectShotTaskResponse.ts +40 -0
- package/src/models/GetVideoDetectShotTaskResponseBody.ts +430 -0
- package/src/models/RunVideoAnalysisRequest.ts +7 -0
- package/src/models/RunVideoAnalysisShrinkRequest.ts +7 -0
- package/src/models/RunVideoDetectShotRequest.ts +131 -0
- package/src/models/RunVideoDetectShotResponse.ts +40 -0
- package/src/models/RunVideoDetectShotResponseBody.ts +311 -0
- package/src/models/RunVideoDetectShotShrinkRequest.ts +125 -0
- package/src/models/SubmitVideoAnalysisTaskRequest.ts +7 -0
- package/src/models/SubmitVideoAnalysisTaskShrinkRequest.ts +7 -0
- package/src/models/SubmitVideoDetectShotTaskRequest.ts +130 -0
- package/src/models/SubmitVideoDetectShotTaskResponse.ts +40 -0
- package/src/models/SubmitVideoDetectShotTaskResponseBody.ts +95 -0
- package/src/models/SubmitVideoDetectShotTaskShrinkRequest.ts +124 -0
- package/src/models/UpdateVideoDetectShotConfigRequest.ts +34 -0
- package/src/models/UpdateVideoDetectShotConfigResponse.ts +40 -0
- package/src/models/UpdateVideoDetectShotConfigResponseBody.ts +62 -0
- package/src/models/UpdateVideoDetectShotTaskRequest.ts +44 -0
- package/src/models/UpdateVideoDetectShotTaskResponse.ts +40 -0
- package/src/models/UpdateVideoDetectShotTaskResponseBody.ts +109 -0
- package/src/models/model.ts +36 -0
package/src/client.ts
CHANGED
|
@@ -505,6 +505,86 @@ export default class Client extends OpenApi {
|
|
|
505
505
|
return await this.getVideoAnalysisTaskWithOptions(workspaceId, request, headers, runtime);
|
|
506
506
|
}
|
|
507
507
|
|
|
508
|
+
/**
|
|
509
|
+
* 智能拆条-获取配置
|
|
510
|
+
*
|
|
511
|
+
* @param headers - map
|
|
512
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
513
|
+
* @returns GetVideoDetectShotConfigResponse
|
|
514
|
+
*/
|
|
515
|
+
async getVideoDetectShotConfigWithOptions(workspaceId: string, headers: {[key: string ]: string}, runtime: $dara.RuntimeOptions): Promise<$_model.GetVideoDetectShotConfigResponse> {
|
|
516
|
+
let req = new $OpenApiUtil.OpenApiRequest({
|
|
517
|
+
headers: headers,
|
|
518
|
+
});
|
|
519
|
+
let params = new $OpenApiUtil.Params({
|
|
520
|
+
action: "GetVideoDetectShotConfig",
|
|
521
|
+
version: "2024-08-01",
|
|
522
|
+
protocol: "HTTPS",
|
|
523
|
+
pathname: `/${$dara.URL.percentEncode(workspaceId)}/quanmiao/lightapp/videoAnalysis/getVideoDetectShotConfig`,
|
|
524
|
+
method: "GET",
|
|
525
|
+
authType: "AK",
|
|
526
|
+
style: "ROA",
|
|
527
|
+
reqBodyType: "json",
|
|
528
|
+
bodyType: "json",
|
|
529
|
+
});
|
|
530
|
+
return $dara.cast<$_model.GetVideoDetectShotConfigResponse>(await this.callApi(params, req, runtime), new $_model.GetVideoDetectShotConfigResponse({}));
|
|
531
|
+
}
|
|
532
|
+
|
|
533
|
+
/**
|
|
534
|
+
* 智能拆条-获取配置
|
|
535
|
+
* @returns GetVideoDetectShotConfigResponse
|
|
536
|
+
*/
|
|
537
|
+
async getVideoDetectShotConfig(workspaceId: string): Promise<$_model.GetVideoDetectShotConfigResponse> {
|
|
538
|
+
let runtime = new $dara.RuntimeOptions({ });
|
|
539
|
+
let headers : {[key: string ]: string} = { };
|
|
540
|
+
return await this.getVideoDetectShotConfigWithOptions(workspaceId, headers, runtime);
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
/**
|
|
544
|
+
* 轻应用-获取视频拆条异步任务结果
|
|
545
|
+
*
|
|
546
|
+
* @param request - GetVideoDetectShotTaskRequest
|
|
547
|
+
* @param headers - map
|
|
548
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
549
|
+
* @returns GetVideoDetectShotTaskResponse
|
|
550
|
+
*/
|
|
551
|
+
async getVideoDetectShotTaskWithOptions(workspaceId: string, request: $_model.GetVideoDetectShotTaskRequest, headers: {[key: string ]: string}, runtime: $dara.RuntimeOptions): Promise<$_model.GetVideoDetectShotTaskResponse> {
|
|
552
|
+
request.validate();
|
|
553
|
+
let query : {[key: string ]: any} = { };
|
|
554
|
+
if (!$dara.isNull(request.taskId)) {
|
|
555
|
+
query["taskId"] = request.taskId;
|
|
556
|
+
}
|
|
557
|
+
|
|
558
|
+
let req = new $OpenApiUtil.OpenApiRequest({
|
|
559
|
+
headers: headers,
|
|
560
|
+
query: OpenApiUtil.query(query),
|
|
561
|
+
});
|
|
562
|
+
let params = new $OpenApiUtil.Params({
|
|
563
|
+
action: "GetVideoDetectShotTask",
|
|
564
|
+
version: "2024-08-01",
|
|
565
|
+
protocol: "HTTPS",
|
|
566
|
+
pathname: `/${$dara.URL.percentEncode(workspaceId)}/quanmiao/lightapp/getVideoDetectShotTask`,
|
|
567
|
+
method: "GET",
|
|
568
|
+
authType: "AK",
|
|
569
|
+
style: "ROA",
|
|
570
|
+
reqBodyType: "json",
|
|
571
|
+
bodyType: "json",
|
|
572
|
+
});
|
|
573
|
+
return $dara.cast<$_model.GetVideoDetectShotTaskResponse>(await this.callApi(params, req, runtime), new $_model.GetVideoDetectShotTaskResponse({}));
|
|
574
|
+
}
|
|
575
|
+
|
|
576
|
+
/**
|
|
577
|
+
* 轻应用-获取视频拆条异步任务结果
|
|
578
|
+
*
|
|
579
|
+
* @param request - GetVideoDetectShotTaskRequest
|
|
580
|
+
* @returns GetVideoDetectShotTaskResponse
|
|
581
|
+
*/
|
|
582
|
+
async getVideoDetectShotTask(workspaceId: string, request: $_model.GetVideoDetectShotTaskRequest): Promise<$_model.GetVideoDetectShotTaskResponse> {
|
|
583
|
+
let runtime = new $dara.RuntimeOptions({ });
|
|
584
|
+
let headers : {[key: string ]: string} = { };
|
|
585
|
+
return await this.getVideoDetectShotTaskWithOptions(workspaceId, request, headers, runtime);
|
|
586
|
+
}
|
|
587
|
+
|
|
508
588
|
/**
|
|
509
589
|
* 热点新闻推荐
|
|
510
590
|
*
|
|
@@ -2699,6 +2779,10 @@ export default class Client extends OpenApi {
|
|
|
2699
2779
|
body["splitInterval"] = request.splitInterval;
|
|
2700
2780
|
}
|
|
2701
2781
|
|
|
2782
|
+
if (!$dara.isNull(request.splitType)) {
|
|
2783
|
+
body["splitType"] = request.splitType;
|
|
2784
|
+
}
|
|
2785
|
+
|
|
2702
2786
|
if (!$dara.isNull(request.taskId)) {
|
|
2703
2787
|
body["taskId"] = request.taskId;
|
|
2704
2788
|
}
|
|
@@ -2851,6 +2935,10 @@ export default class Client extends OpenApi {
|
|
|
2851
2935
|
body["splitInterval"] = request.splitInterval;
|
|
2852
2936
|
}
|
|
2853
2937
|
|
|
2938
|
+
if (!$dara.isNull(request.splitType)) {
|
|
2939
|
+
body["splitType"] = request.splitType;
|
|
2940
|
+
}
|
|
2941
|
+
|
|
2854
2942
|
if (!$dara.isNull(request.taskId)) {
|
|
2855
2943
|
body["taskId"] = request.taskId;
|
|
2856
2944
|
}
|
|
@@ -2917,6 +3005,221 @@ export default class Client extends OpenApi {
|
|
|
2917
3005
|
return await this.runVideoAnalysisWithOptions(workspaceId, request, headers, runtime);
|
|
2918
3006
|
}
|
|
2919
3007
|
|
|
3008
|
+
/**
|
|
3009
|
+
* 轻应用-视频拆条
|
|
3010
|
+
*
|
|
3011
|
+
* @param tmpReq - RunVideoDetectShotRequest
|
|
3012
|
+
* @param headers - map
|
|
3013
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
3014
|
+
* @returns RunVideoDetectShotResponse
|
|
3015
|
+
*/
|
|
3016
|
+
async *runVideoDetectShotWithSSE(workspaceId: string, tmpReq: $_model.RunVideoDetectShotRequest, headers: {[key: string ]: string}, runtime: $dara.RuntimeOptions): AsyncGenerator<$_model.RunVideoDetectShotResponse, any, unknown> {
|
|
3017
|
+
tmpReq.validate();
|
|
3018
|
+
let request = new $_model.RunVideoDetectShotShrinkRequest({ });
|
|
3019
|
+
OpenApiUtil.convert(tmpReq, request);
|
|
3020
|
+
if (!$dara.isNull(tmpReq.options)) {
|
|
3021
|
+
request.optionsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.options, "options", "json");
|
|
3022
|
+
}
|
|
3023
|
+
|
|
3024
|
+
if (!$dara.isNull(tmpReq.recognitionOptions)) {
|
|
3025
|
+
request.recognitionOptionsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.recognitionOptions, "recognitionOptions", "json");
|
|
3026
|
+
}
|
|
3027
|
+
|
|
3028
|
+
let body : {[key: string ]: any} = { };
|
|
3029
|
+
if (!$dara.isNull(request.intelliSimpPrompt)) {
|
|
3030
|
+
body["intelliSimpPrompt"] = request.intelliSimpPrompt;
|
|
3031
|
+
}
|
|
3032
|
+
|
|
3033
|
+
if (!$dara.isNull(request.intelliSimpPromptTemplateId)) {
|
|
3034
|
+
body["intelliSimpPromptTemplateId"] = request.intelliSimpPromptTemplateId;
|
|
3035
|
+
}
|
|
3036
|
+
|
|
3037
|
+
if (!$dara.isNull(request.language)) {
|
|
3038
|
+
body["language"] = request.language;
|
|
3039
|
+
}
|
|
3040
|
+
|
|
3041
|
+
if (!$dara.isNull(request.modelCustomPromptTemplateId)) {
|
|
3042
|
+
body["modelCustomPromptTemplateId"] = request.modelCustomPromptTemplateId;
|
|
3043
|
+
}
|
|
3044
|
+
|
|
3045
|
+
if (!$dara.isNull(request.modelId)) {
|
|
3046
|
+
body["modelId"] = request.modelId;
|
|
3047
|
+
}
|
|
3048
|
+
|
|
3049
|
+
if (!$dara.isNull(request.modelVlCustomPromptTemplateId)) {
|
|
3050
|
+
body["modelVlCustomPromptTemplateId"] = request.modelVlCustomPromptTemplateId;
|
|
3051
|
+
}
|
|
3052
|
+
|
|
3053
|
+
if (!$dara.isNull(request.optionsShrink)) {
|
|
3054
|
+
body["options"] = request.optionsShrink;
|
|
3055
|
+
}
|
|
3056
|
+
|
|
3057
|
+
if (!$dara.isNull(request.originalSessionId)) {
|
|
3058
|
+
body["originalSessionId"] = request.originalSessionId;
|
|
3059
|
+
}
|
|
3060
|
+
|
|
3061
|
+
if (!$dara.isNull(request.preModelId)) {
|
|
3062
|
+
body["preModelId"] = request.preModelId;
|
|
3063
|
+
}
|
|
3064
|
+
|
|
3065
|
+
if (!$dara.isNull(request.prompt)) {
|
|
3066
|
+
body["prompt"] = request.prompt;
|
|
3067
|
+
}
|
|
3068
|
+
|
|
3069
|
+
if (!$dara.isNull(request.recognitionOptionsShrink)) {
|
|
3070
|
+
body["recognitionOptions"] = request.recognitionOptionsShrink;
|
|
3071
|
+
}
|
|
3072
|
+
|
|
3073
|
+
if (!$dara.isNull(request.taskId)) {
|
|
3074
|
+
body["taskId"] = request.taskId;
|
|
3075
|
+
}
|
|
3076
|
+
|
|
3077
|
+
if (!$dara.isNull(request.videoUrl)) {
|
|
3078
|
+
body["videoUrl"] = request.videoUrl;
|
|
3079
|
+
}
|
|
3080
|
+
|
|
3081
|
+
if (!$dara.isNull(request.vlPrompt)) {
|
|
3082
|
+
body["vlPrompt"] = request.vlPrompt;
|
|
3083
|
+
}
|
|
3084
|
+
|
|
3085
|
+
let req = new $OpenApiUtil.OpenApiRequest({
|
|
3086
|
+
headers: headers,
|
|
3087
|
+
body: OpenApiUtil.parseToMap(body),
|
|
3088
|
+
});
|
|
3089
|
+
let params = new $OpenApiUtil.Params({
|
|
3090
|
+
action: "RunVideoDetectShot",
|
|
3091
|
+
version: "2024-08-01",
|
|
3092
|
+
protocol: "HTTPS",
|
|
3093
|
+
pathname: `/${$dara.URL.percentEncode(workspaceId)}/quanmiao/lightapp/runVideoDetectShot`,
|
|
3094
|
+
method: "POST",
|
|
3095
|
+
authType: "AK",
|
|
3096
|
+
style: "ROA",
|
|
3097
|
+
reqBodyType: "formData",
|
|
3098
|
+
bodyType: "json",
|
|
3099
|
+
});
|
|
3100
|
+
let sseResp = await this.callSSEApi(params, req, runtime);
|
|
3101
|
+
|
|
3102
|
+
for await (let resp of sseResp) {
|
|
3103
|
+
let data = JSON.parse(resp.event.data);
|
|
3104
|
+
yield $dara.cast<$_model.RunVideoDetectShotResponse>({
|
|
3105
|
+
statusCode: resp.statusCode,
|
|
3106
|
+
headers: resp.headers,
|
|
3107
|
+
body: {
|
|
3108
|
+
...data,
|
|
3109
|
+
RequestId: resp.event.id,
|
|
3110
|
+
Message: resp.event.event,
|
|
3111
|
+
},
|
|
3112
|
+
}, new $_model.RunVideoDetectShotResponse({}));
|
|
3113
|
+
}
|
|
3114
|
+
}
|
|
3115
|
+
|
|
3116
|
+
/**
|
|
3117
|
+
* 轻应用-视频拆条
|
|
3118
|
+
*
|
|
3119
|
+
* @param tmpReq - RunVideoDetectShotRequest
|
|
3120
|
+
* @param headers - map
|
|
3121
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
3122
|
+
* @returns RunVideoDetectShotResponse
|
|
3123
|
+
*/
|
|
3124
|
+
async runVideoDetectShotWithOptions(workspaceId: string, tmpReq: $_model.RunVideoDetectShotRequest, headers: {[key: string ]: string}, runtime: $dara.RuntimeOptions): Promise<$_model.RunVideoDetectShotResponse> {
|
|
3125
|
+
tmpReq.validate();
|
|
3126
|
+
let request = new $_model.RunVideoDetectShotShrinkRequest({ });
|
|
3127
|
+
OpenApiUtil.convert(tmpReq, request);
|
|
3128
|
+
if (!$dara.isNull(tmpReq.options)) {
|
|
3129
|
+
request.optionsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.options, "options", "json");
|
|
3130
|
+
}
|
|
3131
|
+
|
|
3132
|
+
if (!$dara.isNull(tmpReq.recognitionOptions)) {
|
|
3133
|
+
request.recognitionOptionsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.recognitionOptions, "recognitionOptions", "json");
|
|
3134
|
+
}
|
|
3135
|
+
|
|
3136
|
+
let body : {[key: string ]: any} = { };
|
|
3137
|
+
if (!$dara.isNull(request.intelliSimpPrompt)) {
|
|
3138
|
+
body["intelliSimpPrompt"] = request.intelliSimpPrompt;
|
|
3139
|
+
}
|
|
3140
|
+
|
|
3141
|
+
if (!$dara.isNull(request.intelliSimpPromptTemplateId)) {
|
|
3142
|
+
body["intelliSimpPromptTemplateId"] = request.intelliSimpPromptTemplateId;
|
|
3143
|
+
}
|
|
3144
|
+
|
|
3145
|
+
if (!$dara.isNull(request.language)) {
|
|
3146
|
+
body["language"] = request.language;
|
|
3147
|
+
}
|
|
3148
|
+
|
|
3149
|
+
if (!$dara.isNull(request.modelCustomPromptTemplateId)) {
|
|
3150
|
+
body["modelCustomPromptTemplateId"] = request.modelCustomPromptTemplateId;
|
|
3151
|
+
}
|
|
3152
|
+
|
|
3153
|
+
if (!$dara.isNull(request.modelId)) {
|
|
3154
|
+
body["modelId"] = request.modelId;
|
|
3155
|
+
}
|
|
3156
|
+
|
|
3157
|
+
if (!$dara.isNull(request.modelVlCustomPromptTemplateId)) {
|
|
3158
|
+
body["modelVlCustomPromptTemplateId"] = request.modelVlCustomPromptTemplateId;
|
|
3159
|
+
}
|
|
3160
|
+
|
|
3161
|
+
if (!$dara.isNull(request.optionsShrink)) {
|
|
3162
|
+
body["options"] = request.optionsShrink;
|
|
3163
|
+
}
|
|
3164
|
+
|
|
3165
|
+
if (!$dara.isNull(request.originalSessionId)) {
|
|
3166
|
+
body["originalSessionId"] = request.originalSessionId;
|
|
3167
|
+
}
|
|
3168
|
+
|
|
3169
|
+
if (!$dara.isNull(request.preModelId)) {
|
|
3170
|
+
body["preModelId"] = request.preModelId;
|
|
3171
|
+
}
|
|
3172
|
+
|
|
3173
|
+
if (!$dara.isNull(request.prompt)) {
|
|
3174
|
+
body["prompt"] = request.prompt;
|
|
3175
|
+
}
|
|
3176
|
+
|
|
3177
|
+
if (!$dara.isNull(request.recognitionOptionsShrink)) {
|
|
3178
|
+
body["recognitionOptions"] = request.recognitionOptionsShrink;
|
|
3179
|
+
}
|
|
3180
|
+
|
|
3181
|
+
if (!$dara.isNull(request.taskId)) {
|
|
3182
|
+
body["taskId"] = request.taskId;
|
|
3183
|
+
}
|
|
3184
|
+
|
|
3185
|
+
if (!$dara.isNull(request.videoUrl)) {
|
|
3186
|
+
body["videoUrl"] = request.videoUrl;
|
|
3187
|
+
}
|
|
3188
|
+
|
|
3189
|
+
if (!$dara.isNull(request.vlPrompt)) {
|
|
3190
|
+
body["vlPrompt"] = request.vlPrompt;
|
|
3191
|
+
}
|
|
3192
|
+
|
|
3193
|
+
let req = new $OpenApiUtil.OpenApiRequest({
|
|
3194
|
+
headers: headers,
|
|
3195
|
+
body: OpenApiUtil.parseToMap(body),
|
|
3196
|
+
});
|
|
3197
|
+
let params = new $OpenApiUtil.Params({
|
|
3198
|
+
action: "RunVideoDetectShot",
|
|
3199
|
+
version: "2024-08-01",
|
|
3200
|
+
protocol: "HTTPS",
|
|
3201
|
+
pathname: `/${$dara.URL.percentEncode(workspaceId)}/quanmiao/lightapp/runVideoDetectShot`,
|
|
3202
|
+
method: "POST",
|
|
3203
|
+
authType: "AK",
|
|
3204
|
+
style: "ROA",
|
|
3205
|
+
reqBodyType: "formData",
|
|
3206
|
+
bodyType: "json",
|
|
3207
|
+
});
|
|
3208
|
+
return $dara.cast<$_model.RunVideoDetectShotResponse>(await this.callApi(params, req, runtime), new $_model.RunVideoDetectShotResponse({}));
|
|
3209
|
+
}
|
|
3210
|
+
|
|
3211
|
+
/**
|
|
3212
|
+
* 轻应用-视频拆条
|
|
3213
|
+
*
|
|
3214
|
+
* @param request - RunVideoDetectShotRequest
|
|
3215
|
+
* @returns RunVideoDetectShotResponse
|
|
3216
|
+
*/
|
|
3217
|
+
async runVideoDetectShot(workspaceId: string, request: $_model.RunVideoDetectShotRequest): Promise<$_model.RunVideoDetectShotResponse> {
|
|
3218
|
+
let runtime = new $dara.RuntimeOptions({ });
|
|
3219
|
+
let headers : {[key: string ]: string} = { };
|
|
3220
|
+
return await this.runVideoDetectShotWithOptions(workspaceId, request, headers, runtime);
|
|
3221
|
+
}
|
|
3222
|
+
|
|
2920
3223
|
/**
|
|
2921
3224
|
* 提交企业VOC异步任务
|
|
2922
3225
|
*
|
|
@@ -3263,6 +3566,10 @@ export default class Client extends OpenApi {
|
|
|
3263
3566
|
body["splitInterval"] = request.splitInterval;
|
|
3264
3567
|
}
|
|
3265
3568
|
|
|
3569
|
+
if (!$dara.isNull(request.splitType)) {
|
|
3570
|
+
body["splitType"] = request.splitType;
|
|
3571
|
+
}
|
|
3572
|
+
|
|
3266
3573
|
if (!$dara.isNull(request.textProcessTasksShrink)) {
|
|
3267
3574
|
body["textProcessTasks"] = request.textProcessTasksShrink;
|
|
3268
3575
|
}
|
|
@@ -3325,6 +3632,117 @@ export default class Client extends OpenApi {
|
|
|
3325
3632
|
return await this.submitVideoAnalysisTaskWithOptions(workspaceId, request, headers, runtime);
|
|
3326
3633
|
}
|
|
3327
3634
|
|
|
3635
|
+
/**
|
|
3636
|
+
* 轻应用-提交视频拆条任务
|
|
3637
|
+
*
|
|
3638
|
+
* @param tmpReq - SubmitVideoDetectShotTaskRequest
|
|
3639
|
+
* @param headers - map
|
|
3640
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
3641
|
+
* @returns SubmitVideoDetectShotTaskResponse
|
|
3642
|
+
*/
|
|
3643
|
+
async submitVideoDetectShotTaskWithOptions(workspaceId: string, tmpReq: $_model.SubmitVideoDetectShotTaskRequest, headers: {[key: string ]: string}, runtime: $dara.RuntimeOptions): Promise<$_model.SubmitVideoDetectShotTaskResponse> {
|
|
3644
|
+
tmpReq.validate();
|
|
3645
|
+
let request = new $_model.SubmitVideoDetectShotTaskShrinkRequest({ });
|
|
3646
|
+
OpenApiUtil.convert(tmpReq, request);
|
|
3647
|
+
if (!$dara.isNull(tmpReq.options)) {
|
|
3648
|
+
request.optionsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.options, "options", "json");
|
|
3649
|
+
}
|
|
3650
|
+
|
|
3651
|
+
if (!$dara.isNull(tmpReq.recognitionOptions)) {
|
|
3652
|
+
request.recognitionOptionsShrink = OpenApiUtil.arrayToStringWithSpecifiedStyle(tmpReq.recognitionOptions, "recognitionOptions", "json");
|
|
3653
|
+
}
|
|
3654
|
+
|
|
3655
|
+
let body : {[key: string ]: any} = { };
|
|
3656
|
+
if (!$dara.isNull(request.deduplicationId)) {
|
|
3657
|
+
body["deduplicationId"] = request.deduplicationId;
|
|
3658
|
+
}
|
|
3659
|
+
|
|
3660
|
+
if (!$dara.isNull(request.intelliSimpPrompt)) {
|
|
3661
|
+
body["intelliSimpPrompt"] = request.intelliSimpPrompt;
|
|
3662
|
+
}
|
|
3663
|
+
|
|
3664
|
+
if (!$dara.isNull(request.intelliSimpPromptTemplateId)) {
|
|
3665
|
+
body["intelliSimpPromptTemplateId"] = request.intelliSimpPromptTemplateId;
|
|
3666
|
+
}
|
|
3667
|
+
|
|
3668
|
+
if (!$dara.isNull(request.language)) {
|
|
3669
|
+
body["language"] = request.language;
|
|
3670
|
+
}
|
|
3671
|
+
|
|
3672
|
+
if (!$dara.isNull(request.modelCustomPromptTemplateId)) {
|
|
3673
|
+
body["modelCustomPromptTemplateId"] = request.modelCustomPromptTemplateId;
|
|
3674
|
+
}
|
|
3675
|
+
|
|
3676
|
+
if (!$dara.isNull(request.modelId)) {
|
|
3677
|
+
body["modelId"] = request.modelId;
|
|
3678
|
+
}
|
|
3679
|
+
|
|
3680
|
+
if (!$dara.isNull(request.modelVlCustomPromptTemplateId)) {
|
|
3681
|
+
body["modelVlCustomPromptTemplateId"] = request.modelVlCustomPromptTemplateId;
|
|
3682
|
+
}
|
|
3683
|
+
|
|
3684
|
+
if (!$dara.isNull(request.optionsShrink)) {
|
|
3685
|
+
body["options"] = request.optionsShrink;
|
|
3686
|
+
}
|
|
3687
|
+
|
|
3688
|
+
if (!$dara.isNull(request.originalSessionId)) {
|
|
3689
|
+
body["originalSessionId"] = request.originalSessionId;
|
|
3690
|
+
}
|
|
3691
|
+
|
|
3692
|
+
if (!$dara.isNull(request.preModelId)) {
|
|
3693
|
+
body["preModelId"] = request.preModelId;
|
|
3694
|
+
}
|
|
3695
|
+
|
|
3696
|
+
if (!$dara.isNull(request.prompt)) {
|
|
3697
|
+
body["prompt"] = request.prompt;
|
|
3698
|
+
}
|
|
3699
|
+
|
|
3700
|
+
if (!$dara.isNull(request.recognitionOptionsShrink)) {
|
|
3701
|
+
body["recognitionOptions"] = request.recognitionOptionsShrink;
|
|
3702
|
+
}
|
|
3703
|
+
|
|
3704
|
+
if (!$dara.isNull(request.taskId)) {
|
|
3705
|
+
body["taskId"] = request.taskId;
|
|
3706
|
+
}
|
|
3707
|
+
|
|
3708
|
+
if (!$dara.isNull(request.videoUrl)) {
|
|
3709
|
+
body["videoUrl"] = request.videoUrl;
|
|
3710
|
+
}
|
|
3711
|
+
|
|
3712
|
+
if (!$dara.isNull(request.vlPrompt)) {
|
|
3713
|
+
body["vlPrompt"] = request.vlPrompt;
|
|
3714
|
+
}
|
|
3715
|
+
|
|
3716
|
+
let req = new $OpenApiUtil.OpenApiRequest({
|
|
3717
|
+
headers: headers,
|
|
3718
|
+
body: OpenApiUtil.parseToMap(body),
|
|
3719
|
+
});
|
|
3720
|
+
let params = new $OpenApiUtil.Params({
|
|
3721
|
+
action: "SubmitVideoDetectShotTask",
|
|
3722
|
+
version: "2024-08-01",
|
|
3723
|
+
protocol: "HTTPS",
|
|
3724
|
+
pathname: `/${$dara.URL.percentEncode(workspaceId)}/quanmiao/lightapp/submitVideoDetectShotTask`,
|
|
3725
|
+
method: "POST",
|
|
3726
|
+
authType: "AK",
|
|
3727
|
+
style: "ROA",
|
|
3728
|
+
reqBodyType: "formData",
|
|
3729
|
+
bodyType: "json",
|
|
3730
|
+
});
|
|
3731
|
+
return $dara.cast<$_model.SubmitVideoDetectShotTaskResponse>(await this.callApi(params, req, runtime), new $_model.SubmitVideoDetectShotTaskResponse({}));
|
|
3732
|
+
}
|
|
3733
|
+
|
|
3734
|
+
/**
|
|
3735
|
+
* 轻应用-提交视频拆条任务
|
|
3736
|
+
*
|
|
3737
|
+
* @param request - SubmitVideoDetectShotTaskRequest
|
|
3738
|
+
* @returns SubmitVideoDetectShotTaskResponse
|
|
3739
|
+
*/
|
|
3740
|
+
async submitVideoDetectShotTask(workspaceId: string, request: $_model.SubmitVideoDetectShotTaskRequest): Promise<$_model.SubmitVideoDetectShotTaskResponse> {
|
|
3741
|
+
let runtime = new $dara.RuntimeOptions({ });
|
|
3742
|
+
let headers : {[key: string ]: string} = { };
|
|
3743
|
+
return await this.submitVideoDetectShotTaskWithOptions(workspaceId, request, headers, runtime);
|
|
3744
|
+
}
|
|
3745
|
+
|
|
3328
3746
|
/**
|
|
3329
3747
|
* 视频理解-更新配置
|
|
3330
3748
|
*
|
|
@@ -3474,4 +3892,98 @@ export default class Client extends OpenApi {
|
|
|
3474
3892
|
return await this.updateVideoAnalysisTasksWithOptions(workspaceId, request, headers, runtime);
|
|
3475
3893
|
}
|
|
3476
3894
|
|
|
3895
|
+
/**
|
|
3896
|
+
* 智能拆条-更新配置
|
|
3897
|
+
*
|
|
3898
|
+
* @param request - UpdateVideoDetectShotConfigRequest
|
|
3899
|
+
* @param headers - map
|
|
3900
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
3901
|
+
* @returns UpdateVideoDetectShotConfigResponse
|
|
3902
|
+
*/
|
|
3903
|
+
async updateVideoDetectShotConfigWithOptions(workspaceId: string, request: $_model.UpdateVideoDetectShotConfigRequest, headers: {[key: string ]: string}, runtime: $dara.RuntimeOptions): Promise<$_model.UpdateVideoDetectShotConfigResponse> {
|
|
3904
|
+
request.validate();
|
|
3905
|
+
let body : {[key: string ]: any} = { };
|
|
3906
|
+
if (!$dara.isNull(request.asyncConcurrency)) {
|
|
3907
|
+
body["asyncConcurrency"] = request.asyncConcurrency;
|
|
3908
|
+
}
|
|
3909
|
+
|
|
3910
|
+
let req = new $OpenApiUtil.OpenApiRequest({
|
|
3911
|
+
headers: headers,
|
|
3912
|
+
body: OpenApiUtil.parseToMap(body),
|
|
3913
|
+
});
|
|
3914
|
+
let params = new $OpenApiUtil.Params({
|
|
3915
|
+
action: "UpdateVideoDetectShotConfig",
|
|
3916
|
+
version: "2024-08-01",
|
|
3917
|
+
protocol: "HTTPS",
|
|
3918
|
+
pathname: `/${$dara.URL.percentEncode(workspaceId)}/quanmiao/lightapp/videoAnalysis/updateVideoDetectShotConfig`,
|
|
3919
|
+
method: "PUT",
|
|
3920
|
+
authType: "AK",
|
|
3921
|
+
style: "ROA",
|
|
3922
|
+
reqBodyType: "formData",
|
|
3923
|
+
bodyType: "json",
|
|
3924
|
+
});
|
|
3925
|
+
return $dara.cast<$_model.UpdateVideoDetectShotConfigResponse>(await this.callApi(params, req, runtime), new $_model.UpdateVideoDetectShotConfigResponse({}));
|
|
3926
|
+
}
|
|
3927
|
+
|
|
3928
|
+
/**
|
|
3929
|
+
* 智能拆条-更新配置
|
|
3930
|
+
*
|
|
3931
|
+
* @param request - UpdateVideoDetectShotConfigRequest
|
|
3932
|
+
* @returns UpdateVideoDetectShotConfigResponse
|
|
3933
|
+
*/
|
|
3934
|
+
async updateVideoDetectShotConfig(workspaceId: string, request: $_model.UpdateVideoDetectShotConfigRequest): Promise<$_model.UpdateVideoDetectShotConfigResponse> {
|
|
3935
|
+
let runtime = new $dara.RuntimeOptions({ });
|
|
3936
|
+
let headers : {[key: string ]: string} = { };
|
|
3937
|
+
return await this.updateVideoDetectShotConfigWithOptions(workspaceId, request, headers, runtime);
|
|
3938
|
+
}
|
|
3939
|
+
|
|
3940
|
+
/**
|
|
3941
|
+
* 视频拆条-修改任务状态
|
|
3942
|
+
*
|
|
3943
|
+
* @param request - UpdateVideoDetectShotTaskRequest
|
|
3944
|
+
* @param headers - map
|
|
3945
|
+
* @param runtime - runtime options for this request RuntimeOptions
|
|
3946
|
+
* @returns UpdateVideoDetectShotTaskResponse
|
|
3947
|
+
*/
|
|
3948
|
+
async updateVideoDetectShotTaskWithOptions(workspaceId: string, request: $_model.UpdateVideoDetectShotTaskRequest, headers: {[key: string ]: string}, runtime: $dara.RuntimeOptions): Promise<$_model.UpdateVideoDetectShotTaskResponse> {
|
|
3949
|
+
request.validate();
|
|
3950
|
+
let body : {[key: string ]: any} = { };
|
|
3951
|
+
if (!$dara.isNull(request.taskId)) {
|
|
3952
|
+
body["taskId"] = request.taskId;
|
|
3953
|
+
}
|
|
3954
|
+
|
|
3955
|
+
if (!$dara.isNull(request.taskStatus)) {
|
|
3956
|
+
body["taskStatus"] = request.taskStatus;
|
|
3957
|
+
}
|
|
3958
|
+
|
|
3959
|
+
let req = new $OpenApiUtil.OpenApiRequest({
|
|
3960
|
+
headers: headers,
|
|
3961
|
+
body: OpenApiUtil.parseToMap(body),
|
|
3962
|
+
});
|
|
3963
|
+
let params = new $OpenApiUtil.Params({
|
|
3964
|
+
action: "UpdateVideoDetectShotTask",
|
|
3965
|
+
version: "2024-08-01",
|
|
3966
|
+
protocol: "HTTPS",
|
|
3967
|
+
pathname: `/${$dara.URL.percentEncode(workspaceId)}/quanmiao/lightapp/updateVideoDetectShotTask`,
|
|
3968
|
+
method: "PUT",
|
|
3969
|
+
authType: "AK",
|
|
3970
|
+
style: "ROA",
|
|
3971
|
+
reqBodyType: "formData",
|
|
3972
|
+
bodyType: "json",
|
|
3973
|
+
});
|
|
3974
|
+
return $dara.cast<$_model.UpdateVideoDetectShotTaskResponse>(await this.callApi(params, req, runtime), new $_model.UpdateVideoDetectShotTaskResponse({}));
|
|
3975
|
+
}
|
|
3976
|
+
|
|
3977
|
+
/**
|
|
3978
|
+
* 视频拆条-修改任务状态
|
|
3979
|
+
*
|
|
3980
|
+
* @param request - UpdateVideoDetectShotTaskRequest
|
|
3981
|
+
* @returns UpdateVideoDetectShotTaskResponse
|
|
3982
|
+
*/
|
|
3983
|
+
async updateVideoDetectShotTask(workspaceId: string, request: $_model.UpdateVideoDetectShotTaskRequest): Promise<$_model.UpdateVideoDetectShotTaskResponse> {
|
|
3984
|
+
let runtime = new $dara.RuntimeOptions({ });
|
|
3985
|
+
let headers : {[key: string ]: string} = { };
|
|
3986
|
+
return await this.updateVideoDetectShotTaskWithOptions(workspaceId, request, headers, runtime);
|
|
3987
|
+
}
|
|
3988
|
+
|
|
3477
3989
|
}
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
// This file is auto-generated, don't edit it
|
|
2
|
+
import * as $dara from '@darabonba/typescript';
|
|
3
|
+
import { GetVideoDetectShotConfigResponseBody } from "./GetVideoDetectShotConfigResponseBody";
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
export class GetVideoDetectShotConfigResponse extends $dara.Model {
|
|
7
|
+
headers?: { [key: string]: string };
|
|
8
|
+
statusCode?: number;
|
|
9
|
+
body?: GetVideoDetectShotConfigResponseBody;
|
|
10
|
+
static names(): { [key: string]: string } {
|
|
11
|
+
return {
|
|
12
|
+
headers: 'headers',
|
|
13
|
+
statusCode: 'statusCode',
|
|
14
|
+
body: 'body',
|
|
15
|
+
};
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
static types(): { [key: string]: any } {
|
|
19
|
+
return {
|
|
20
|
+
headers: { 'type': 'map', 'keyType': 'string', 'valueType': 'string' },
|
|
21
|
+
statusCode: 'number',
|
|
22
|
+
body: GetVideoDetectShotConfigResponseBody,
|
|
23
|
+
};
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
validate() {
|
|
27
|
+
if(this.headers) {
|
|
28
|
+
$dara.Model.validateMap(this.headers);
|
|
29
|
+
}
|
|
30
|
+
if(this.body && typeof (this.body as any).validate === 'function') {
|
|
31
|
+
(this.body as any).validate();
|
|
32
|
+
}
|
|
33
|
+
super.validate();
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
constructor(map?: { [key: string]: any }) {
|
|
37
|
+
super(map);
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
// This file is auto-generated, don't edit it
|
|
2
|
+
import * as $dara from '@darabonba/typescript';
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
export class GetVideoDetectShotConfigResponseBodyData extends $dara.Model {
|
|
6
|
+
/**
|
|
7
|
+
* @example
|
|
8
|
+
* 2
|
|
9
|
+
*/
|
|
10
|
+
asyncConcurrency?: number;
|
|
11
|
+
static names(): { [key: string]: string } {
|
|
12
|
+
return {
|
|
13
|
+
asyncConcurrency: 'asyncConcurrency',
|
|
14
|
+
};
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
static types(): { [key: string]: any } {
|
|
18
|
+
return {
|
|
19
|
+
asyncConcurrency: 'number',
|
|
20
|
+
};
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
validate() {
|
|
24
|
+
super.validate();
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
constructor(map?: { [key: string]: any }) {
|
|
28
|
+
super(map);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
export class GetVideoDetectShotConfigResponseBody extends $dara.Model {
|
|
33
|
+
/**
|
|
34
|
+
* @remarks
|
|
35
|
+
* code
|
|
36
|
+
*
|
|
37
|
+
* @example
|
|
38
|
+
* 200
|
|
39
|
+
*/
|
|
40
|
+
code?: string;
|
|
41
|
+
data?: GetVideoDetectShotConfigResponseBodyData;
|
|
42
|
+
/**
|
|
43
|
+
* @example
|
|
44
|
+
* 200
|
|
45
|
+
*/
|
|
46
|
+
httpStatusCode?: number;
|
|
47
|
+
/**
|
|
48
|
+
* @example
|
|
49
|
+
* msg
|
|
50
|
+
*/
|
|
51
|
+
message?: string;
|
|
52
|
+
/**
|
|
53
|
+
* @remarks
|
|
54
|
+
* Id of the request
|
|
55
|
+
*
|
|
56
|
+
* @example
|
|
57
|
+
* xxx
|
|
58
|
+
*/
|
|
59
|
+
requestId?: string;
|
|
60
|
+
/**
|
|
61
|
+
* @remarks
|
|
62
|
+
* success
|
|
63
|
+
*/
|
|
64
|
+
success?: boolean;
|
|
65
|
+
static names(): { [key: string]: string } {
|
|
66
|
+
return {
|
|
67
|
+
code: 'code',
|
|
68
|
+
data: 'data',
|
|
69
|
+
httpStatusCode: 'httpStatusCode',
|
|
70
|
+
message: 'message',
|
|
71
|
+
requestId: 'requestId',
|
|
72
|
+
success: 'success',
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
static types(): { [key: string]: any } {
|
|
77
|
+
return {
|
|
78
|
+
code: 'string',
|
|
79
|
+
data: GetVideoDetectShotConfigResponseBodyData,
|
|
80
|
+
httpStatusCode: 'number',
|
|
81
|
+
message: 'string',
|
|
82
|
+
requestId: 'string',
|
|
83
|
+
success: 'boolean',
|
|
84
|
+
};
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
validate() {
|
|
88
|
+
if(this.data && typeof (this.data as any).validate === 'function') {
|
|
89
|
+
(this.data as any).validate();
|
|
90
|
+
}
|
|
91
|
+
super.validate();
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
constructor(map?: { [key: string]: any }) {
|
|
95
|
+
super(map);
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|