alibabacloud-quanmiaolightapp20240801 2.12.0__py3-none-any.whl → 2.13.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alibabacloud_quanmiaolightapp20240801/__init__.py +1 -1
- alibabacloud_quanmiaolightapp20240801/client.py +774 -0
- alibabacloud_quanmiaolightapp20240801/models.py +6222 -4345
- {alibabacloud_quanmiaolightapp20240801-2.12.0.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.0.dist-info}/METADATA +1 -1
- alibabacloud_quanmiaolightapp20240801-2.13.0.dist-info/RECORD +8 -0
- alibabacloud_quanmiaolightapp20240801-2.12.0.dist-info/RECORD +0 -8
- {alibabacloud_quanmiaolightapp20240801-2.12.0.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.0.dist-info}/LICENSE +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.12.0.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.0.dist-info}/WHEEL +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.12.0.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.0.dist-info}/top_level.txt +0 -0
|
@@ -1143,6 +1143,204 @@ class Client(OpenApiClient):
|
|
|
1143
1143
|
headers = {}
|
|
1144
1144
|
return await self.get_video_analysis_task_with_options_async(workspace_id, request, headers, runtime)
|
|
1145
1145
|
|
|
1146
|
+
def get_video_detect_shot_config_with_options(
|
|
1147
|
+
self,
|
|
1148
|
+
workspace_id: str,
|
|
1149
|
+
headers: Dict[str, str],
|
|
1150
|
+
runtime: util_models.RuntimeOptions,
|
|
1151
|
+
) -> quan_miao_light_app_20240801_models.GetVideoDetectShotConfigResponse:
|
|
1152
|
+
"""
|
|
1153
|
+
@summary 智能拆条-获取配置
|
|
1154
|
+
|
|
1155
|
+
@param headers: map
|
|
1156
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
1157
|
+
@return: GetVideoDetectShotConfigResponse
|
|
1158
|
+
"""
|
|
1159
|
+
req = open_api_models.OpenApiRequest(
|
|
1160
|
+
headers=headers
|
|
1161
|
+
)
|
|
1162
|
+
params = open_api_models.Params(
|
|
1163
|
+
action='GetVideoDetectShotConfig',
|
|
1164
|
+
version='2024-08-01',
|
|
1165
|
+
protocol='HTTPS',
|
|
1166
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/videoAnalysis/getVideoDetectShotConfig',
|
|
1167
|
+
method='GET',
|
|
1168
|
+
auth_type='AK',
|
|
1169
|
+
style='ROA',
|
|
1170
|
+
req_body_type='json',
|
|
1171
|
+
body_type='json'
|
|
1172
|
+
)
|
|
1173
|
+
return TeaCore.from_map(
|
|
1174
|
+
quan_miao_light_app_20240801_models.GetVideoDetectShotConfigResponse(),
|
|
1175
|
+
self.call_api(params, req, runtime)
|
|
1176
|
+
)
|
|
1177
|
+
|
|
1178
|
+
async def get_video_detect_shot_config_with_options_async(
|
|
1179
|
+
self,
|
|
1180
|
+
workspace_id: str,
|
|
1181
|
+
headers: Dict[str, str],
|
|
1182
|
+
runtime: util_models.RuntimeOptions,
|
|
1183
|
+
) -> quan_miao_light_app_20240801_models.GetVideoDetectShotConfigResponse:
|
|
1184
|
+
"""
|
|
1185
|
+
@summary 智能拆条-获取配置
|
|
1186
|
+
|
|
1187
|
+
@param headers: map
|
|
1188
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
1189
|
+
@return: GetVideoDetectShotConfigResponse
|
|
1190
|
+
"""
|
|
1191
|
+
req = open_api_models.OpenApiRequest(
|
|
1192
|
+
headers=headers
|
|
1193
|
+
)
|
|
1194
|
+
params = open_api_models.Params(
|
|
1195
|
+
action='GetVideoDetectShotConfig',
|
|
1196
|
+
version='2024-08-01',
|
|
1197
|
+
protocol='HTTPS',
|
|
1198
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/videoAnalysis/getVideoDetectShotConfig',
|
|
1199
|
+
method='GET',
|
|
1200
|
+
auth_type='AK',
|
|
1201
|
+
style='ROA',
|
|
1202
|
+
req_body_type='json',
|
|
1203
|
+
body_type='json'
|
|
1204
|
+
)
|
|
1205
|
+
return TeaCore.from_map(
|
|
1206
|
+
quan_miao_light_app_20240801_models.GetVideoDetectShotConfigResponse(),
|
|
1207
|
+
await self.call_api_async(params, req, runtime)
|
|
1208
|
+
)
|
|
1209
|
+
|
|
1210
|
+
def get_video_detect_shot_config(
|
|
1211
|
+
self,
|
|
1212
|
+
workspace_id: str,
|
|
1213
|
+
) -> quan_miao_light_app_20240801_models.GetVideoDetectShotConfigResponse:
|
|
1214
|
+
"""
|
|
1215
|
+
@summary 智能拆条-获取配置
|
|
1216
|
+
|
|
1217
|
+
@return: GetVideoDetectShotConfigResponse
|
|
1218
|
+
"""
|
|
1219
|
+
runtime = util_models.RuntimeOptions()
|
|
1220
|
+
headers = {}
|
|
1221
|
+
return self.get_video_detect_shot_config_with_options(workspace_id, headers, runtime)
|
|
1222
|
+
|
|
1223
|
+
async def get_video_detect_shot_config_async(
|
|
1224
|
+
self,
|
|
1225
|
+
workspace_id: str,
|
|
1226
|
+
) -> quan_miao_light_app_20240801_models.GetVideoDetectShotConfigResponse:
|
|
1227
|
+
"""
|
|
1228
|
+
@summary 智能拆条-获取配置
|
|
1229
|
+
|
|
1230
|
+
@return: GetVideoDetectShotConfigResponse
|
|
1231
|
+
"""
|
|
1232
|
+
runtime = util_models.RuntimeOptions()
|
|
1233
|
+
headers = {}
|
|
1234
|
+
return await self.get_video_detect_shot_config_with_options_async(workspace_id, headers, runtime)
|
|
1235
|
+
|
|
1236
|
+
def get_video_detect_shot_task_with_options(
|
|
1237
|
+
self,
|
|
1238
|
+
workspace_id: str,
|
|
1239
|
+
request: quan_miao_light_app_20240801_models.GetVideoDetectShotTaskRequest,
|
|
1240
|
+
headers: Dict[str, str],
|
|
1241
|
+
runtime: util_models.RuntimeOptions,
|
|
1242
|
+
) -> quan_miao_light_app_20240801_models.GetVideoDetectShotTaskResponse:
|
|
1243
|
+
"""
|
|
1244
|
+
@summary 轻应用-获取视频拆条异步任务结果
|
|
1245
|
+
|
|
1246
|
+
@param request: GetVideoDetectShotTaskRequest
|
|
1247
|
+
@param headers: map
|
|
1248
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
1249
|
+
@return: GetVideoDetectShotTaskResponse
|
|
1250
|
+
"""
|
|
1251
|
+
UtilClient.validate_model(request)
|
|
1252
|
+
query = {}
|
|
1253
|
+
if not UtilClient.is_unset(request.task_id):
|
|
1254
|
+
query['taskId'] = request.task_id
|
|
1255
|
+
req = open_api_models.OpenApiRequest(
|
|
1256
|
+
headers=headers,
|
|
1257
|
+
query=OpenApiUtilClient.query(query)
|
|
1258
|
+
)
|
|
1259
|
+
params = open_api_models.Params(
|
|
1260
|
+
action='GetVideoDetectShotTask',
|
|
1261
|
+
version='2024-08-01',
|
|
1262
|
+
protocol='HTTPS',
|
|
1263
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/getVideoDetectShotTask',
|
|
1264
|
+
method='GET',
|
|
1265
|
+
auth_type='AK',
|
|
1266
|
+
style='ROA',
|
|
1267
|
+
req_body_type='json',
|
|
1268
|
+
body_type='json'
|
|
1269
|
+
)
|
|
1270
|
+
return TeaCore.from_map(
|
|
1271
|
+
quan_miao_light_app_20240801_models.GetVideoDetectShotTaskResponse(),
|
|
1272
|
+
self.call_api(params, req, runtime)
|
|
1273
|
+
)
|
|
1274
|
+
|
|
1275
|
+
async def get_video_detect_shot_task_with_options_async(
|
|
1276
|
+
self,
|
|
1277
|
+
workspace_id: str,
|
|
1278
|
+
request: quan_miao_light_app_20240801_models.GetVideoDetectShotTaskRequest,
|
|
1279
|
+
headers: Dict[str, str],
|
|
1280
|
+
runtime: util_models.RuntimeOptions,
|
|
1281
|
+
) -> quan_miao_light_app_20240801_models.GetVideoDetectShotTaskResponse:
|
|
1282
|
+
"""
|
|
1283
|
+
@summary 轻应用-获取视频拆条异步任务结果
|
|
1284
|
+
|
|
1285
|
+
@param request: GetVideoDetectShotTaskRequest
|
|
1286
|
+
@param headers: map
|
|
1287
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
1288
|
+
@return: GetVideoDetectShotTaskResponse
|
|
1289
|
+
"""
|
|
1290
|
+
UtilClient.validate_model(request)
|
|
1291
|
+
query = {}
|
|
1292
|
+
if not UtilClient.is_unset(request.task_id):
|
|
1293
|
+
query['taskId'] = request.task_id
|
|
1294
|
+
req = open_api_models.OpenApiRequest(
|
|
1295
|
+
headers=headers,
|
|
1296
|
+
query=OpenApiUtilClient.query(query)
|
|
1297
|
+
)
|
|
1298
|
+
params = open_api_models.Params(
|
|
1299
|
+
action='GetVideoDetectShotTask',
|
|
1300
|
+
version='2024-08-01',
|
|
1301
|
+
protocol='HTTPS',
|
|
1302
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/getVideoDetectShotTask',
|
|
1303
|
+
method='GET',
|
|
1304
|
+
auth_type='AK',
|
|
1305
|
+
style='ROA',
|
|
1306
|
+
req_body_type='json',
|
|
1307
|
+
body_type='json'
|
|
1308
|
+
)
|
|
1309
|
+
return TeaCore.from_map(
|
|
1310
|
+
quan_miao_light_app_20240801_models.GetVideoDetectShotTaskResponse(),
|
|
1311
|
+
await self.call_api_async(params, req, runtime)
|
|
1312
|
+
)
|
|
1313
|
+
|
|
1314
|
+
def get_video_detect_shot_task(
|
|
1315
|
+
self,
|
|
1316
|
+
workspace_id: str,
|
|
1317
|
+
request: quan_miao_light_app_20240801_models.GetVideoDetectShotTaskRequest,
|
|
1318
|
+
) -> quan_miao_light_app_20240801_models.GetVideoDetectShotTaskResponse:
|
|
1319
|
+
"""
|
|
1320
|
+
@summary 轻应用-获取视频拆条异步任务结果
|
|
1321
|
+
|
|
1322
|
+
@param request: GetVideoDetectShotTaskRequest
|
|
1323
|
+
@return: GetVideoDetectShotTaskResponse
|
|
1324
|
+
"""
|
|
1325
|
+
runtime = util_models.RuntimeOptions()
|
|
1326
|
+
headers = {}
|
|
1327
|
+
return self.get_video_detect_shot_task_with_options(workspace_id, request, headers, runtime)
|
|
1328
|
+
|
|
1329
|
+
async def get_video_detect_shot_task_async(
|
|
1330
|
+
self,
|
|
1331
|
+
workspace_id: str,
|
|
1332
|
+
request: quan_miao_light_app_20240801_models.GetVideoDetectShotTaskRequest,
|
|
1333
|
+
) -> quan_miao_light_app_20240801_models.GetVideoDetectShotTaskResponse:
|
|
1334
|
+
"""
|
|
1335
|
+
@summary 轻应用-获取视频拆条异步任务结果
|
|
1336
|
+
|
|
1337
|
+
@param request: GetVideoDetectShotTaskRequest
|
|
1338
|
+
@return: GetVideoDetectShotTaskResponse
|
|
1339
|
+
"""
|
|
1340
|
+
runtime = util_models.RuntimeOptions()
|
|
1341
|
+
headers = {}
|
|
1342
|
+
return await self.get_video_detect_shot_task_with_options_async(workspace_id, request, headers, runtime)
|
|
1343
|
+
|
|
1146
1344
|
def hot_news_recommend_with_options(
|
|
1147
1345
|
self,
|
|
1148
1346
|
workspace_id: str,
|
|
@@ -3410,6 +3608,8 @@ class Client(OpenApiClient):
|
|
|
3410
3608
|
body['snapshotInterval'] = request.snapshot_interval
|
|
3411
3609
|
if not UtilClient.is_unset(request.split_interval):
|
|
3412
3610
|
body['splitInterval'] = request.split_interval
|
|
3611
|
+
if not UtilClient.is_unset(request.split_type):
|
|
3612
|
+
body['splitType'] = request.split_type
|
|
3413
3613
|
if not UtilClient.is_unset(request.task_id):
|
|
3414
3614
|
body['taskId'] = request.task_id
|
|
3415
3615
|
if not UtilClient.is_unset(request.text_process_tasks_shrink):
|
|
@@ -3503,6 +3703,8 @@ class Client(OpenApiClient):
|
|
|
3503
3703
|
body['snapshotInterval'] = request.snapshot_interval
|
|
3504
3704
|
if not UtilClient.is_unset(request.split_interval):
|
|
3505
3705
|
body['splitInterval'] = request.split_interval
|
|
3706
|
+
if not UtilClient.is_unset(request.split_type):
|
|
3707
|
+
body['splitType'] = request.split_type
|
|
3506
3708
|
if not UtilClient.is_unset(request.task_id):
|
|
3507
3709
|
body['taskId'] = request.task_id
|
|
3508
3710
|
if not UtilClient.is_unset(request.text_process_tasks_shrink):
|
|
@@ -3571,6 +3773,178 @@ class Client(OpenApiClient):
|
|
|
3571
3773
|
headers = {}
|
|
3572
3774
|
return await self.run_video_analysis_with_options_async(workspace_id, request, headers, runtime)
|
|
3573
3775
|
|
|
3776
|
+
def run_video_detect_shot_with_options(
|
|
3777
|
+
self,
|
|
3778
|
+
workspace_id: str,
|
|
3779
|
+
tmp_req: quan_miao_light_app_20240801_models.RunVideoDetectShotRequest,
|
|
3780
|
+
headers: Dict[str, str],
|
|
3781
|
+
runtime: util_models.RuntimeOptions,
|
|
3782
|
+
) -> quan_miao_light_app_20240801_models.RunVideoDetectShotResponse:
|
|
3783
|
+
"""
|
|
3784
|
+
@summary 轻应用-视频拆条
|
|
3785
|
+
|
|
3786
|
+
@param tmp_req: RunVideoDetectShotRequest
|
|
3787
|
+
@param headers: map
|
|
3788
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
3789
|
+
@return: RunVideoDetectShotResponse
|
|
3790
|
+
"""
|
|
3791
|
+
UtilClient.validate_model(tmp_req)
|
|
3792
|
+
request = quan_miao_light_app_20240801_models.RunVideoDetectShotShrinkRequest()
|
|
3793
|
+
OpenApiUtilClient.convert(tmp_req, request)
|
|
3794
|
+
if not UtilClient.is_unset(tmp_req.options):
|
|
3795
|
+
request.options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.options, 'options', 'json')
|
|
3796
|
+
if not UtilClient.is_unset(tmp_req.recognition_options):
|
|
3797
|
+
request.recognition_options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.recognition_options, 'recognitionOptions', 'json')
|
|
3798
|
+
body = {}
|
|
3799
|
+
if not UtilClient.is_unset(request.intelli_simp_prompt):
|
|
3800
|
+
body['intelliSimpPrompt'] = request.intelli_simp_prompt
|
|
3801
|
+
if not UtilClient.is_unset(request.intelli_simp_prompt_template_id):
|
|
3802
|
+
body['intelliSimpPromptTemplateId'] = request.intelli_simp_prompt_template_id
|
|
3803
|
+
if not UtilClient.is_unset(request.language):
|
|
3804
|
+
body['language'] = request.language
|
|
3805
|
+
if not UtilClient.is_unset(request.model_custom_prompt_template_id):
|
|
3806
|
+
body['modelCustomPromptTemplateId'] = request.model_custom_prompt_template_id
|
|
3807
|
+
if not UtilClient.is_unset(request.model_id):
|
|
3808
|
+
body['modelId'] = request.model_id
|
|
3809
|
+
if not UtilClient.is_unset(request.model_vl_custom_prompt_template_id):
|
|
3810
|
+
body['modelVlCustomPromptTemplateId'] = request.model_vl_custom_prompt_template_id
|
|
3811
|
+
if not UtilClient.is_unset(request.options_shrink):
|
|
3812
|
+
body['options'] = request.options_shrink
|
|
3813
|
+
if not UtilClient.is_unset(request.original_session_id):
|
|
3814
|
+
body['originalSessionId'] = request.original_session_id
|
|
3815
|
+
if not UtilClient.is_unset(request.pre_model_id):
|
|
3816
|
+
body['preModelId'] = request.pre_model_id
|
|
3817
|
+
if not UtilClient.is_unset(request.prompt):
|
|
3818
|
+
body['prompt'] = request.prompt
|
|
3819
|
+
if not UtilClient.is_unset(request.recognition_options_shrink):
|
|
3820
|
+
body['recognitionOptions'] = request.recognition_options_shrink
|
|
3821
|
+
if not UtilClient.is_unset(request.task_id):
|
|
3822
|
+
body['taskId'] = request.task_id
|
|
3823
|
+
if not UtilClient.is_unset(request.video_url):
|
|
3824
|
+
body['videoUrl'] = request.video_url
|
|
3825
|
+
if not UtilClient.is_unset(request.vl_prompt):
|
|
3826
|
+
body['vlPrompt'] = request.vl_prompt
|
|
3827
|
+
req = open_api_models.OpenApiRequest(
|
|
3828
|
+
headers=headers,
|
|
3829
|
+
body=OpenApiUtilClient.parse_to_map(body)
|
|
3830
|
+
)
|
|
3831
|
+
params = open_api_models.Params(
|
|
3832
|
+
action='RunVideoDetectShot',
|
|
3833
|
+
version='2024-08-01',
|
|
3834
|
+
protocol='HTTPS',
|
|
3835
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/runVideoDetectShot',
|
|
3836
|
+
method='POST',
|
|
3837
|
+
auth_type='AK',
|
|
3838
|
+
style='ROA',
|
|
3839
|
+
req_body_type='formData',
|
|
3840
|
+
body_type='json'
|
|
3841
|
+
)
|
|
3842
|
+
return TeaCore.from_map(
|
|
3843
|
+
quan_miao_light_app_20240801_models.RunVideoDetectShotResponse(),
|
|
3844
|
+
self.call_api(params, req, runtime)
|
|
3845
|
+
)
|
|
3846
|
+
|
|
3847
|
+
async def run_video_detect_shot_with_options_async(
|
|
3848
|
+
self,
|
|
3849
|
+
workspace_id: str,
|
|
3850
|
+
tmp_req: quan_miao_light_app_20240801_models.RunVideoDetectShotRequest,
|
|
3851
|
+
headers: Dict[str, str],
|
|
3852
|
+
runtime: util_models.RuntimeOptions,
|
|
3853
|
+
) -> quan_miao_light_app_20240801_models.RunVideoDetectShotResponse:
|
|
3854
|
+
"""
|
|
3855
|
+
@summary 轻应用-视频拆条
|
|
3856
|
+
|
|
3857
|
+
@param tmp_req: RunVideoDetectShotRequest
|
|
3858
|
+
@param headers: map
|
|
3859
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
3860
|
+
@return: RunVideoDetectShotResponse
|
|
3861
|
+
"""
|
|
3862
|
+
UtilClient.validate_model(tmp_req)
|
|
3863
|
+
request = quan_miao_light_app_20240801_models.RunVideoDetectShotShrinkRequest()
|
|
3864
|
+
OpenApiUtilClient.convert(tmp_req, request)
|
|
3865
|
+
if not UtilClient.is_unset(tmp_req.options):
|
|
3866
|
+
request.options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.options, 'options', 'json')
|
|
3867
|
+
if not UtilClient.is_unset(tmp_req.recognition_options):
|
|
3868
|
+
request.recognition_options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.recognition_options, 'recognitionOptions', 'json')
|
|
3869
|
+
body = {}
|
|
3870
|
+
if not UtilClient.is_unset(request.intelli_simp_prompt):
|
|
3871
|
+
body['intelliSimpPrompt'] = request.intelli_simp_prompt
|
|
3872
|
+
if not UtilClient.is_unset(request.intelli_simp_prompt_template_id):
|
|
3873
|
+
body['intelliSimpPromptTemplateId'] = request.intelli_simp_prompt_template_id
|
|
3874
|
+
if not UtilClient.is_unset(request.language):
|
|
3875
|
+
body['language'] = request.language
|
|
3876
|
+
if not UtilClient.is_unset(request.model_custom_prompt_template_id):
|
|
3877
|
+
body['modelCustomPromptTemplateId'] = request.model_custom_prompt_template_id
|
|
3878
|
+
if not UtilClient.is_unset(request.model_id):
|
|
3879
|
+
body['modelId'] = request.model_id
|
|
3880
|
+
if not UtilClient.is_unset(request.model_vl_custom_prompt_template_id):
|
|
3881
|
+
body['modelVlCustomPromptTemplateId'] = request.model_vl_custom_prompt_template_id
|
|
3882
|
+
if not UtilClient.is_unset(request.options_shrink):
|
|
3883
|
+
body['options'] = request.options_shrink
|
|
3884
|
+
if not UtilClient.is_unset(request.original_session_id):
|
|
3885
|
+
body['originalSessionId'] = request.original_session_id
|
|
3886
|
+
if not UtilClient.is_unset(request.pre_model_id):
|
|
3887
|
+
body['preModelId'] = request.pre_model_id
|
|
3888
|
+
if not UtilClient.is_unset(request.prompt):
|
|
3889
|
+
body['prompt'] = request.prompt
|
|
3890
|
+
if not UtilClient.is_unset(request.recognition_options_shrink):
|
|
3891
|
+
body['recognitionOptions'] = request.recognition_options_shrink
|
|
3892
|
+
if not UtilClient.is_unset(request.task_id):
|
|
3893
|
+
body['taskId'] = request.task_id
|
|
3894
|
+
if not UtilClient.is_unset(request.video_url):
|
|
3895
|
+
body['videoUrl'] = request.video_url
|
|
3896
|
+
if not UtilClient.is_unset(request.vl_prompt):
|
|
3897
|
+
body['vlPrompt'] = request.vl_prompt
|
|
3898
|
+
req = open_api_models.OpenApiRequest(
|
|
3899
|
+
headers=headers,
|
|
3900
|
+
body=OpenApiUtilClient.parse_to_map(body)
|
|
3901
|
+
)
|
|
3902
|
+
params = open_api_models.Params(
|
|
3903
|
+
action='RunVideoDetectShot',
|
|
3904
|
+
version='2024-08-01',
|
|
3905
|
+
protocol='HTTPS',
|
|
3906
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/runVideoDetectShot',
|
|
3907
|
+
method='POST',
|
|
3908
|
+
auth_type='AK',
|
|
3909
|
+
style='ROA',
|
|
3910
|
+
req_body_type='formData',
|
|
3911
|
+
body_type='json'
|
|
3912
|
+
)
|
|
3913
|
+
return TeaCore.from_map(
|
|
3914
|
+
quan_miao_light_app_20240801_models.RunVideoDetectShotResponse(),
|
|
3915
|
+
await self.call_api_async(params, req, runtime)
|
|
3916
|
+
)
|
|
3917
|
+
|
|
3918
|
+
def run_video_detect_shot(
|
|
3919
|
+
self,
|
|
3920
|
+
workspace_id: str,
|
|
3921
|
+
request: quan_miao_light_app_20240801_models.RunVideoDetectShotRequest,
|
|
3922
|
+
) -> quan_miao_light_app_20240801_models.RunVideoDetectShotResponse:
|
|
3923
|
+
"""
|
|
3924
|
+
@summary 轻应用-视频拆条
|
|
3925
|
+
|
|
3926
|
+
@param request: RunVideoDetectShotRequest
|
|
3927
|
+
@return: RunVideoDetectShotResponse
|
|
3928
|
+
"""
|
|
3929
|
+
runtime = util_models.RuntimeOptions()
|
|
3930
|
+
headers = {}
|
|
3931
|
+
return self.run_video_detect_shot_with_options(workspace_id, request, headers, runtime)
|
|
3932
|
+
|
|
3933
|
+
async def run_video_detect_shot_async(
|
|
3934
|
+
self,
|
|
3935
|
+
workspace_id: str,
|
|
3936
|
+
request: quan_miao_light_app_20240801_models.RunVideoDetectShotRequest,
|
|
3937
|
+
) -> quan_miao_light_app_20240801_models.RunVideoDetectShotResponse:
|
|
3938
|
+
"""
|
|
3939
|
+
@summary 轻应用-视频拆条
|
|
3940
|
+
|
|
3941
|
+
@param request: RunVideoDetectShotRequest
|
|
3942
|
+
@return: RunVideoDetectShotResponse
|
|
3943
|
+
"""
|
|
3944
|
+
runtime = util_models.RuntimeOptions()
|
|
3945
|
+
headers = {}
|
|
3946
|
+
return await self.run_video_detect_shot_with_options_async(workspace_id, request, headers, runtime)
|
|
3947
|
+
|
|
3574
3948
|
def submit_enterprise_voc_analysis_task_with_options(
|
|
3575
3949
|
self,
|
|
3576
3950
|
workspace_id: str,
|
|
@@ -4082,6 +4456,8 @@ class Client(OpenApiClient):
|
|
|
4082
4456
|
body['snapshotInterval'] = request.snapshot_interval
|
|
4083
4457
|
if not UtilClient.is_unset(request.split_interval):
|
|
4084
4458
|
body['splitInterval'] = request.split_interval
|
|
4459
|
+
if not UtilClient.is_unset(request.split_type):
|
|
4460
|
+
body['splitType'] = request.split_type
|
|
4085
4461
|
if not UtilClient.is_unset(request.text_process_tasks_shrink):
|
|
4086
4462
|
body['textProcessTasks'] = request.text_process_tasks_shrink
|
|
4087
4463
|
if not UtilClient.is_unset(request.video_caption_info_shrink):
|
|
@@ -4173,6 +4549,8 @@ class Client(OpenApiClient):
|
|
|
4173
4549
|
body['snapshotInterval'] = request.snapshot_interval
|
|
4174
4550
|
if not UtilClient.is_unset(request.split_interval):
|
|
4175
4551
|
body['splitInterval'] = request.split_interval
|
|
4552
|
+
if not UtilClient.is_unset(request.split_type):
|
|
4553
|
+
body['splitType'] = request.split_type
|
|
4176
4554
|
if not UtilClient.is_unset(request.text_process_tasks_shrink):
|
|
4177
4555
|
body['textProcessTasks'] = request.text_process_tasks_shrink
|
|
4178
4556
|
if not UtilClient.is_unset(request.video_caption_info_shrink):
|
|
@@ -4239,6 +4617,182 @@ class Client(OpenApiClient):
|
|
|
4239
4617
|
headers = {}
|
|
4240
4618
|
return await self.submit_video_analysis_task_with_options_async(workspace_id, request, headers, runtime)
|
|
4241
4619
|
|
|
4620
|
+
def submit_video_detect_shot_task_with_options(
|
|
4621
|
+
self,
|
|
4622
|
+
workspace_id: str,
|
|
4623
|
+
tmp_req: quan_miao_light_app_20240801_models.SubmitVideoDetectShotTaskRequest,
|
|
4624
|
+
headers: Dict[str, str],
|
|
4625
|
+
runtime: util_models.RuntimeOptions,
|
|
4626
|
+
) -> quan_miao_light_app_20240801_models.SubmitVideoDetectShotTaskResponse:
|
|
4627
|
+
"""
|
|
4628
|
+
@summary 轻应用-提交视频拆条任务
|
|
4629
|
+
|
|
4630
|
+
@param tmp_req: SubmitVideoDetectShotTaskRequest
|
|
4631
|
+
@param headers: map
|
|
4632
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
4633
|
+
@return: SubmitVideoDetectShotTaskResponse
|
|
4634
|
+
"""
|
|
4635
|
+
UtilClient.validate_model(tmp_req)
|
|
4636
|
+
request = quan_miao_light_app_20240801_models.SubmitVideoDetectShotTaskShrinkRequest()
|
|
4637
|
+
OpenApiUtilClient.convert(tmp_req, request)
|
|
4638
|
+
if not UtilClient.is_unset(tmp_req.options):
|
|
4639
|
+
request.options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.options, 'options', 'json')
|
|
4640
|
+
if not UtilClient.is_unset(tmp_req.recognition_options):
|
|
4641
|
+
request.recognition_options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.recognition_options, 'recognitionOptions', 'json')
|
|
4642
|
+
body = {}
|
|
4643
|
+
if not UtilClient.is_unset(request.deduplication_id):
|
|
4644
|
+
body['deduplicationId'] = request.deduplication_id
|
|
4645
|
+
if not UtilClient.is_unset(request.intelli_simp_prompt):
|
|
4646
|
+
body['intelliSimpPrompt'] = request.intelli_simp_prompt
|
|
4647
|
+
if not UtilClient.is_unset(request.intelli_simp_prompt_template_id):
|
|
4648
|
+
body['intelliSimpPromptTemplateId'] = request.intelli_simp_prompt_template_id
|
|
4649
|
+
if not UtilClient.is_unset(request.language):
|
|
4650
|
+
body['language'] = request.language
|
|
4651
|
+
if not UtilClient.is_unset(request.model_custom_prompt_template_id):
|
|
4652
|
+
body['modelCustomPromptTemplateId'] = request.model_custom_prompt_template_id
|
|
4653
|
+
if not UtilClient.is_unset(request.model_id):
|
|
4654
|
+
body['modelId'] = request.model_id
|
|
4655
|
+
if not UtilClient.is_unset(request.model_vl_custom_prompt_template_id):
|
|
4656
|
+
body['modelVlCustomPromptTemplateId'] = request.model_vl_custom_prompt_template_id
|
|
4657
|
+
if not UtilClient.is_unset(request.options_shrink):
|
|
4658
|
+
body['options'] = request.options_shrink
|
|
4659
|
+
if not UtilClient.is_unset(request.original_session_id):
|
|
4660
|
+
body['originalSessionId'] = request.original_session_id
|
|
4661
|
+
if not UtilClient.is_unset(request.pre_model_id):
|
|
4662
|
+
body['preModelId'] = request.pre_model_id
|
|
4663
|
+
if not UtilClient.is_unset(request.prompt):
|
|
4664
|
+
body['prompt'] = request.prompt
|
|
4665
|
+
if not UtilClient.is_unset(request.recognition_options_shrink):
|
|
4666
|
+
body['recognitionOptions'] = request.recognition_options_shrink
|
|
4667
|
+
if not UtilClient.is_unset(request.task_id):
|
|
4668
|
+
body['taskId'] = request.task_id
|
|
4669
|
+
if not UtilClient.is_unset(request.video_url):
|
|
4670
|
+
body['videoUrl'] = request.video_url
|
|
4671
|
+
if not UtilClient.is_unset(request.vl_prompt):
|
|
4672
|
+
body['vlPrompt'] = request.vl_prompt
|
|
4673
|
+
req = open_api_models.OpenApiRequest(
|
|
4674
|
+
headers=headers,
|
|
4675
|
+
body=OpenApiUtilClient.parse_to_map(body)
|
|
4676
|
+
)
|
|
4677
|
+
params = open_api_models.Params(
|
|
4678
|
+
action='SubmitVideoDetectShotTask',
|
|
4679
|
+
version='2024-08-01',
|
|
4680
|
+
protocol='HTTPS',
|
|
4681
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/submitVideoDetectShotTask',
|
|
4682
|
+
method='POST',
|
|
4683
|
+
auth_type='AK',
|
|
4684
|
+
style='ROA',
|
|
4685
|
+
req_body_type='formData',
|
|
4686
|
+
body_type='json'
|
|
4687
|
+
)
|
|
4688
|
+
return TeaCore.from_map(
|
|
4689
|
+
quan_miao_light_app_20240801_models.SubmitVideoDetectShotTaskResponse(),
|
|
4690
|
+
self.call_api(params, req, runtime)
|
|
4691
|
+
)
|
|
4692
|
+
|
|
4693
|
+
async def submit_video_detect_shot_task_with_options_async(
|
|
4694
|
+
self,
|
|
4695
|
+
workspace_id: str,
|
|
4696
|
+
tmp_req: quan_miao_light_app_20240801_models.SubmitVideoDetectShotTaskRequest,
|
|
4697
|
+
headers: Dict[str, str],
|
|
4698
|
+
runtime: util_models.RuntimeOptions,
|
|
4699
|
+
) -> quan_miao_light_app_20240801_models.SubmitVideoDetectShotTaskResponse:
|
|
4700
|
+
"""
|
|
4701
|
+
@summary 轻应用-提交视频拆条任务
|
|
4702
|
+
|
|
4703
|
+
@param tmp_req: SubmitVideoDetectShotTaskRequest
|
|
4704
|
+
@param headers: map
|
|
4705
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
4706
|
+
@return: SubmitVideoDetectShotTaskResponse
|
|
4707
|
+
"""
|
|
4708
|
+
UtilClient.validate_model(tmp_req)
|
|
4709
|
+
request = quan_miao_light_app_20240801_models.SubmitVideoDetectShotTaskShrinkRequest()
|
|
4710
|
+
OpenApiUtilClient.convert(tmp_req, request)
|
|
4711
|
+
if not UtilClient.is_unset(tmp_req.options):
|
|
4712
|
+
request.options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.options, 'options', 'json')
|
|
4713
|
+
if not UtilClient.is_unset(tmp_req.recognition_options):
|
|
4714
|
+
request.recognition_options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.recognition_options, 'recognitionOptions', 'json')
|
|
4715
|
+
body = {}
|
|
4716
|
+
if not UtilClient.is_unset(request.deduplication_id):
|
|
4717
|
+
body['deduplicationId'] = request.deduplication_id
|
|
4718
|
+
if not UtilClient.is_unset(request.intelli_simp_prompt):
|
|
4719
|
+
body['intelliSimpPrompt'] = request.intelli_simp_prompt
|
|
4720
|
+
if not UtilClient.is_unset(request.intelli_simp_prompt_template_id):
|
|
4721
|
+
body['intelliSimpPromptTemplateId'] = request.intelli_simp_prompt_template_id
|
|
4722
|
+
if not UtilClient.is_unset(request.language):
|
|
4723
|
+
body['language'] = request.language
|
|
4724
|
+
if not UtilClient.is_unset(request.model_custom_prompt_template_id):
|
|
4725
|
+
body['modelCustomPromptTemplateId'] = request.model_custom_prompt_template_id
|
|
4726
|
+
if not UtilClient.is_unset(request.model_id):
|
|
4727
|
+
body['modelId'] = request.model_id
|
|
4728
|
+
if not UtilClient.is_unset(request.model_vl_custom_prompt_template_id):
|
|
4729
|
+
body['modelVlCustomPromptTemplateId'] = request.model_vl_custom_prompt_template_id
|
|
4730
|
+
if not UtilClient.is_unset(request.options_shrink):
|
|
4731
|
+
body['options'] = request.options_shrink
|
|
4732
|
+
if not UtilClient.is_unset(request.original_session_id):
|
|
4733
|
+
body['originalSessionId'] = request.original_session_id
|
|
4734
|
+
if not UtilClient.is_unset(request.pre_model_id):
|
|
4735
|
+
body['preModelId'] = request.pre_model_id
|
|
4736
|
+
if not UtilClient.is_unset(request.prompt):
|
|
4737
|
+
body['prompt'] = request.prompt
|
|
4738
|
+
if not UtilClient.is_unset(request.recognition_options_shrink):
|
|
4739
|
+
body['recognitionOptions'] = request.recognition_options_shrink
|
|
4740
|
+
if not UtilClient.is_unset(request.task_id):
|
|
4741
|
+
body['taskId'] = request.task_id
|
|
4742
|
+
if not UtilClient.is_unset(request.video_url):
|
|
4743
|
+
body['videoUrl'] = request.video_url
|
|
4744
|
+
if not UtilClient.is_unset(request.vl_prompt):
|
|
4745
|
+
body['vlPrompt'] = request.vl_prompt
|
|
4746
|
+
req = open_api_models.OpenApiRequest(
|
|
4747
|
+
headers=headers,
|
|
4748
|
+
body=OpenApiUtilClient.parse_to_map(body)
|
|
4749
|
+
)
|
|
4750
|
+
params = open_api_models.Params(
|
|
4751
|
+
action='SubmitVideoDetectShotTask',
|
|
4752
|
+
version='2024-08-01',
|
|
4753
|
+
protocol='HTTPS',
|
|
4754
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/submitVideoDetectShotTask',
|
|
4755
|
+
method='POST',
|
|
4756
|
+
auth_type='AK',
|
|
4757
|
+
style='ROA',
|
|
4758
|
+
req_body_type='formData',
|
|
4759
|
+
body_type='json'
|
|
4760
|
+
)
|
|
4761
|
+
return TeaCore.from_map(
|
|
4762
|
+
quan_miao_light_app_20240801_models.SubmitVideoDetectShotTaskResponse(),
|
|
4763
|
+
await self.call_api_async(params, req, runtime)
|
|
4764
|
+
)
|
|
4765
|
+
|
|
4766
|
+
def submit_video_detect_shot_task(
|
|
4767
|
+
self,
|
|
4768
|
+
workspace_id: str,
|
|
4769
|
+
request: quan_miao_light_app_20240801_models.SubmitVideoDetectShotTaskRequest,
|
|
4770
|
+
) -> quan_miao_light_app_20240801_models.SubmitVideoDetectShotTaskResponse:
|
|
4771
|
+
"""
|
|
4772
|
+
@summary 轻应用-提交视频拆条任务
|
|
4773
|
+
|
|
4774
|
+
@param request: SubmitVideoDetectShotTaskRequest
|
|
4775
|
+
@return: SubmitVideoDetectShotTaskResponse
|
|
4776
|
+
"""
|
|
4777
|
+
runtime = util_models.RuntimeOptions()
|
|
4778
|
+
headers = {}
|
|
4779
|
+
return self.submit_video_detect_shot_task_with_options(workspace_id, request, headers, runtime)
|
|
4780
|
+
|
|
4781
|
+
async def submit_video_detect_shot_task_async(
|
|
4782
|
+
self,
|
|
4783
|
+
workspace_id: str,
|
|
4784
|
+
request: quan_miao_light_app_20240801_models.SubmitVideoDetectShotTaskRequest,
|
|
4785
|
+
) -> quan_miao_light_app_20240801_models.SubmitVideoDetectShotTaskResponse:
|
|
4786
|
+
"""
|
|
4787
|
+
@summary 轻应用-提交视频拆条任务
|
|
4788
|
+
|
|
4789
|
+
@param request: SubmitVideoDetectShotTaskRequest
|
|
4790
|
+
@return: SubmitVideoDetectShotTaskResponse
|
|
4791
|
+
"""
|
|
4792
|
+
runtime = util_models.RuntimeOptions()
|
|
4793
|
+
headers = {}
|
|
4794
|
+
return await self.submit_video_detect_shot_task_with_options_async(workspace_id, request, headers, runtime)
|
|
4795
|
+
|
|
4242
4796
|
def update_video_analysis_config_with_options(
|
|
4243
4797
|
self,
|
|
4244
4798
|
workspace_id: str,
|
|
@@ -4578,3 +5132,223 @@ class Client(OpenApiClient):
|
|
|
4578
5132
|
runtime = util_models.RuntimeOptions()
|
|
4579
5133
|
headers = {}
|
|
4580
5134
|
return await self.update_video_analysis_tasks_with_options_async(workspace_id, request, headers, runtime)
|
|
5135
|
+
|
|
5136
|
+
def update_video_detect_shot_config_with_options(
|
|
5137
|
+
self,
|
|
5138
|
+
workspace_id: str,
|
|
5139
|
+
request: quan_miao_light_app_20240801_models.UpdateVideoDetectShotConfigRequest,
|
|
5140
|
+
headers: Dict[str, str],
|
|
5141
|
+
runtime: util_models.RuntimeOptions,
|
|
5142
|
+
) -> quan_miao_light_app_20240801_models.UpdateVideoDetectShotConfigResponse:
|
|
5143
|
+
"""
|
|
5144
|
+
@summary 智能拆条-更新配置
|
|
5145
|
+
|
|
5146
|
+
@param request: UpdateVideoDetectShotConfigRequest
|
|
5147
|
+
@param headers: map
|
|
5148
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
5149
|
+
@return: UpdateVideoDetectShotConfigResponse
|
|
5150
|
+
"""
|
|
5151
|
+
UtilClient.validate_model(request)
|
|
5152
|
+
body = {}
|
|
5153
|
+
if not UtilClient.is_unset(request.async_concurrency):
|
|
5154
|
+
body['asyncConcurrency'] = request.async_concurrency
|
|
5155
|
+
req = open_api_models.OpenApiRequest(
|
|
5156
|
+
headers=headers,
|
|
5157
|
+
body=OpenApiUtilClient.parse_to_map(body)
|
|
5158
|
+
)
|
|
5159
|
+
params = open_api_models.Params(
|
|
5160
|
+
action='UpdateVideoDetectShotConfig',
|
|
5161
|
+
version='2024-08-01',
|
|
5162
|
+
protocol='HTTPS',
|
|
5163
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/videoAnalysis/updateVideoDetectShotConfig',
|
|
5164
|
+
method='PUT',
|
|
5165
|
+
auth_type='AK',
|
|
5166
|
+
style='ROA',
|
|
5167
|
+
req_body_type='formData',
|
|
5168
|
+
body_type='json'
|
|
5169
|
+
)
|
|
5170
|
+
return TeaCore.from_map(
|
|
5171
|
+
quan_miao_light_app_20240801_models.UpdateVideoDetectShotConfigResponse(),
|
|
5172
|
+
self.call_api(params, req, runtime)
|
|
5173
|
+
)
|
|
5174
|
+
|
|
5175
|
+
async def update_video_detect_shot_config_with_options_async(
|
|
5176
|
+
self,
|
|
5177
|
+
workspace_id: str,
|
|
5178
|
+
request: quan_miao_light_app_20240801_models.UpdateVideoDetectShotConfigRequest,
|
|
5179
|
+
headers: Dict[str, str],
|
|
5180
|
+
runtime: util_models.RuntimeOptions,
|
|
5181
|
+
) -> quan_miao_light_app_20240801_models.UpdateVideoDetectShotConfigResponse:
|
|
5182
|
+
"""
|
|
5183
|
+
@summary 智能拆条-更新配置
|
|
5184
|
+
|
|
5185
|
+
@param request: UpdateVideoDetectShotConfigRequest
|
|
5186
|
+
@param headers: map
|
|
5187
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
5188
|
+
@return: UpdateVideoDetectShotConfigResponse
|
|
5189
|
+
"""
|
|
5190
|
+
UtilClient.validate_model(request)
|
|
5191
|
+
body = {}
|
|
5192
|
+
if not UtilClient.is_unset(request.async_concurrency):
|
|
5193
|
+
body['asyncConcurrency'] = request.async_concurrency
|
|
5194
|
+
req = open_api_models.OpenApiRequest(
|
|
5195
|
+
headers=headers,
|
|
5196
|
+
body=OpenApiUtilClient.parse_to_map(body)
|
|
5197
|
+
)
|
|
5198
|
+
params = open_api_models.Params(
|
|
5199
|
+
action='UpdateVideoDetectShotConfig',
|
|
5200
|
+
version='2024-08-01',
|
|
5201
|
+
protocol='HTTPS',
|
|
5202
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/videoAnalysis/updateVideoDetectShotConfig',
|
|
5203
|
+
method='PUT',
|
|
5204
|
+
auth_type='AK',
|
|
5205
|
+
style='ROA',
|
|
5206
|
+
req_body_type='formData',
|
|
5207
|
+
body_type='json'
|
|
5208
|
+
)
|
|
5209
|
+
return TeaCore.from_map(
|
|
5210
|
+
quan_miao_light_app_20240801_models.UpdateVideoDetectShotConfigResponse(),
|
|
5211
|
+
await self.call_api_async(params, req, runtime)
|
|
5212
|
+
)
|
|
5213
|
+
|
|
5214
|
+
def update_video_detect_shot_config(
|
|
5215
|
+
self,
|
|
5216
|
+
workspace_id: str,
|
|
5217
|
+
request: quan_miao_light_app_20240801_models.UpdateVideoDetectShotConfigRequest,
|
|
5218
|
+
) -> quan_miao_light_app_20240801_models.UpdateVideoDetectShotConfigResponse:
|
|
5219
|
+
"""
|
|
5220
|
+
@summary 智能拆条-更新配置
|
|
5221
|
+
|
|
5222
|
+
@param request: UpdateVideoDetectShotConfigRequest
|
|
5223
|
+
@return: UpdateVideoDetectShotConfigResponse
|
|
5224
|
+
"""
|
|
5225
|
+
runtime = util_models.RuntimeOptions()
|
|
5226
|
+
headers = {}
|
|
5227
|
+
return self.update_video_detect_shot_config_with_options(workspace_id, request, headers, runtime)
|
|
5228
|
+
|
|
5229
|
+
async def update_video_detect_shot_config_async(
|
|
5230
|
+
self,
|
|
5231
|
+
workspace_id: str,
|
|
5232
|
+
request: quan_miao_light_app_20240801_models.UpdateVideoDetectShotConfigRequest,
|
|
5233
|
+
) -> quan_miao_light_app_20240801_models.UpdateVideoDetectShotConfigResponse:
|
|
5234
|
+
"""
|
|
5235
|
+
@summary 智能拆条-更新配置
|
|
5236
|
+
|
|
5237
|
+
@param request: UpdateVideoDetectShotConfigRequest
|
|
5238
|
+
@return: UpdateVideoDetectShotConfigResponse
|
|
5239
|
+
"""
|
|
5240
|
+
runtime = util_models.RuntimeOptions()
|
|
5241
|
+
headers = {}
|
|
5242
|
+
return await self.update_video_detect_shot_config_with_options_async(workspace_id, request, headers, runtime)
|
|
5243
|
+
|
|
5244
|
+
def update_video_detect_shot_task_with_options(
|
|
5245
|
+
self,
|
|
5246
|
+
workspace_id: str,
|
|
5247
|
+
request: quan_miao_light_app_20240801_models.UpdateVideoDetectShotTaskRequest,
|
|
5248
|
+
headers: Dict[str, str],
|
|
5249
|
+
runtime: util_models.RuntimeOptions,
|
|
5250
|
+
) -> quan_miao_light_app_20240801_models.UpdateVideoDetectShotTaskResponse:
|
|
5251
|
+
"""
|
|
5252
|
+
@summary 视频拆条-修改任务状态
|
|
5253
|
+
|
|
5254
|
+
@param request: UpdateVideoDetectShotTaskRequest
|
|
5255
|
+
@param headers: map
|
|
5256
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
5257
|
+
@return: UpdateVideoDetectShotTaskResponse
|
|
5258
|
+
"""
|
|
5259
|
+
UtilClient.validate_model(request)
|
|
5260
|
+
body = {}
|
|
5261
|
+
if not UtilClient.is_unset(request.task_id):
|
|
5262
|
+
body['taskId'] = request.task_id
|
|
5263
|
+
if not UtilClient.is_unset(request.task_status):
|
|
5264
|
+
body['taskStatus'] = request.task_status
|
|
5265
|
+
req = open_api_models.OpenApiRequest(
|
|
5266
|
+
headers=headers,
|
|
5267
|
+
body=OpenApiUtilClient.parse_to_map(body)
|
|
5268
|
+
)
|
|
5269
|
+
params = open_api_models.Params(
|
|
5270
|
+
action='UpdateVideoDetectShotTask',
|
|
5271
|
+
version='2024-08-01',
|
|
5272
|
+
protocol='HTTPS',
|
|
5273
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/updateVideoDetectShotTask',
|
|
5274
|
+
method='PUT',
|
|
5275
|
+
auth_type='AK',
|
|
5276
|
+
style='ROA',
|
|
5277
|
+
req_body_type='formData',
|
|
5278
|
+
body_type='json'
|
|
5279
|
+
)
|
|
5280
|
+
return TeaCore.from_map(
|
|
5281
|
+
quan_miao_light_app_20240801_models.UpdateVideoDetectShotTaskResponse(),
|
|
5282
|
+
self.call_api(params, req, runtime)
|
|
5283
|
+
)
|
|
5284
|
+
|
|
5285
|
+
async def update_video_detect_shot_task_with_options_async(
|
|
5286
|
+
self,
|
|
5287
|
+
workspace_id: str,
|
|
5288
|
+
request: quan_miao_light_app_20240801_models.UpdateVideoDetectShotTaskRequest,
|
|
5289
|
+
headers: Dict[str, str],
|
|
5290
|
+
runtime: util_models.RuntimeOptions,
|
|
5291
|
+
) -> quan_miao_light_app_20240801_models.UpdateVideoDetectShotTaskResponse:
|
|
5292
|
+
"""
|
|
5293
|
+
@summary 视频拆条-修改任务状态
|
|
5294
|
+
|
|
5295
|
+
@param request: UpdateVideoDetectShotTaskRequest
|
|
5296
|
+
@param headers: map
|
|
5297
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
5298
|
+
@return: UpdateVideoDetectShotTaskResponse
|
|
5299
|
+
"""
|
|
5300
|
+
UtilClient.validate_model(request)
|
|
5301
|
+
body = {}
|
|
5302
|
+
if not UtilClient.is_unset(request.task_id):
|
|
5303
|
+
body['taskId'] = request.task_id
|
|
5304
|
+
if not UtilClient.is_unset(request.task_status):
|
|
5305
|
+
body['taskStatus'] = request.task_status
|
|
5306
|
+
req = open_api_models.OpenApiRequest(
|
|
5307
|
+
headers=headers,
|
|
5308
|
+
body=OpenApiUtilClient.parse_to_map(body)
|
|
5309
|
+
)
|
|
5310
|
+
params = open_api_models.Params(
|
|
5311
|
+
action='UpdateVideoDetectShotTask',
|
|
5312
|
+
version='2024-08-01',
|
|
5313
|
+
protocol='HTTPS',
|
|
5314
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/updateVideoDetectShotTask',
|
|
5315
|
+
method='PUT',
|
|
5316
|
+
auth_type='AK',
|
|
5317
|
+
style='ROA',
|
|
5318
|
+
req_body_type='formData',
|
|
5319
|
+
body_type='json'
|
|
5320
|
+
)
|
|
5321
|
+
return TeaCore.from_map(
|
|
5322
|
+
quan_miao_light_app_20240801_models.UpdateVideoDetectShotTaskResponse(),
|
|
5323
|
+
await self.call_api_async(params, req, runtime)
|
|
5324
|
+
)
|
|
5325
|
+
|
|
5326
|
+
def update_video_detect_shot_task(
|
|
5327
|
+
self,
|
|
5328
|
+
workspace_id: str,
|
|
5329
|
+
request: quan_miao_light_app_20240801_models.UpdateVideoDetectShotTaskRequest,
|
|
5330
|
+
) -> quan_miao_light_app_20240801_models.UpdateVideoDetectShotTaskResponse:
|
|
5331
|
+
"""
|
|
5332
|
+
@summary 视频拆条-修改任务状态
|
|
5333
|
+
|
|
5334
|
+
@param request: UpdateVideoDetectShotTaskRequest
|
|
5335
|
+
@return: UpdateVideoDetectShotTaskResponse
|
|
5336
|
+
"""
|
|
5337
|
+
runtime = util_models.RuntimeOptions()
|
|
5338
|
+
headers = {}
|
|
5339
|
+
return self.update_video_detect_shot_task_with_options(workspace_id, request, headers, runtime)
|
|
5340
|
+
|
|
5341
|
+
async def update_video_detect_shot_task_async(
|
|
5342
|
+
self,
|
|
5343
|
+
workspace_id: str,
|
|
5344
|
+
request: quan_miao_light_app_20240801_models.UpdateVideoDetectShotTaskRequest,
|
|
5345
|
+
) -> quan_miao_light_app_20240801_models.UpdateVideoDetectShotTaskResponse:
|
|
5346
|
+
"""
|
|
5347
|
+
@summary 视频拆条-修改任务状态
|
|
5348
|
+
|
|
5349
|
+
@param request: UpdateVideoDetectShotTaskRequest
|
|
5350
|
+
@return: UpdateVideoDetectShotTaskResponse
|
|
5351
|
+
"""
|
|
5352
|
+
runtime = util_models.RuntimeOptions()
|
|
5353
|
+
headers = {}
|
|
5354
|
+
return await self.update_video_detect_shot_task_with_options_async(workspace_id, request, headers, runtime)
|